diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index eaae1df5fe170..5b86025345d50 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -767,6 +767,18 @@ func TestCustomIRSA(t *testing.T) { runTestTerraformAWS(t) } +// TestCustomIRSA119 runs a simple k8s 1.19 configuration, but with some additional IAM roles for ServiceAccounts +func TestCustomIRSA119(t *testing.T) { + newIntegrationTest("minimal.example.com", "irsa119"). + withOIDCDiscovery(). + withServiceAccountRole("myserviceaccount.default", false). + withServiceAccountRole("myserviceaccount.test-wildcard", false). + withServiceAccountRole("myotherserviceaccount.myapp", true). + withKubeDNS(). + withAddons(dnsControllerAddon). + runTestTerraformAWS(t) +} + // TestClusterNameDigit runs a configuration with a cluster name beginning with a digit func TestClusterNameDigit(t *testing.T) { newIntegrationTest("123.example.com", "digit"). diff --git a/pkg/model/components/apiserver.go b/pkg/model/components/apiserver.go index 243058e8e9459..bd0a11b968e45 100644 --- a/pkg/model/components/apiserver.go +++ b/pkg/model/components/apiserver.go @@ -177,6 +177,16 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { } } + if c.FeatureGates == nil { + c.FeatureGates = make(map[string]string) + } + + if b.IsKubernetesLT("1.20") && clusterSpec.ServiceAccountIssuerDiscovery != nil && fi.BoolValue(&clusterSpec.ServiceAccountIssuerDiscovery.EnableAWSOIDCProvider) { + if _, found := c.FeatureGates["ServiceAccountIssuerDiscovery"]; !found { + c.FeatureGates["ServiceAccountIssuerDiscovery"] = "true" + } + } + return nil } diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_masters.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_masters.minimal.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_masters.minimal.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_myotherserviceaccount.myapp.sa.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_myotherserviceaccount.myapp.sa.minimal.example.com_policy new file mode 100644 index 0000000000000..322edec81fe07 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_myotherserviceaccount.myapp.sa.minimal.example.com_policy @@ -0,0 +1,17 @@ +{ + "Statement": [ + { + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "discovery.example.com/minimal.example.com:sub": "system:serviceaccount:myapp:myotherserviceaccount" + } + }, + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com" + } + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_myserviceaccount.default.sa.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_myserviceaccount.default.sa.minimal.example.com_policy new file mode 100644 index 0000000000000..dca857cc2bb41 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_myserviceaccount.default.sa.minimal.example.com_policy @@ -0,0 +1,17 @@ +{ + "Statement": [ + { + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "discovery.example.com/minimal.example.com:sub": "system:serviceaccount:default:myserviceaccount" + } + }, + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com" + } + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_myserviceaccount.test-wildcard.sa.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_myserviceaccount.test-wildcard.sa.minimal.example.com_policy new file mode 100644 index 0000000000000..dd5bf0d32c54a --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_myserviceaccount.test-wildcard.sa.minimal.example.com_policy @@ -0,0 +1,17 @@ +{ + "Statement": [ + { + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "discovery.example.com/minimal.example.com:sub": "system:serviceaccount:test-*:myserviceaccount" + } + }, + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com" + } + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_nodes.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_nodes.minimal.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_nodes.minimal.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_masters.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_masters.minimal.example.com_policy new file mode 100644 index 0000000000000..89ef0383cbd58 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_masters.minimal.example.com_policy @@ -0,0 +1,277 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_myotherserviceaccount.myapp.sa.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_myotherserviceaccount.myapp.sa.minimal.example.com_policy new file mode 100644 index 0000000000000..cd58ca06e93cc --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_myotherserviceaccount.myapp.sa.minimal.example.com_policy @@ -0,0 +1,23 @@ +{ + "Statement": [ + { + "Action": [ + "dynamodb:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "es:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_nodes.minimal.example.com_policy b/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_nodes.minimal.example.com_policy new file mode 100644 index 0000000000000..49595fda9ade0 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_iam_role_policy_nodes.minimal.example.com_policy @@ -0,0 +1,42 @@ +{ + "Statement": [ + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/addons/*", + "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/cluster-completed.spec", + "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/igconfig/node/*", + "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/secrets/dockerconfig" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/irsa119/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/irsa119/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data b/tests/integration/update_cluster/irsa119/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data new file mode 100644 index 0000000000000..cccb0de326034 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data @@ -0,0 +1,258 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: docker +containerd: + configOverride: | + disabled_plugins = ["cri"] + logLevel: info +docker: + ipMasq: false + ipTables: false + logDriver: json-file + logLevel: info + logOpt: + - max-size=10m + - max-file=5 + storage: overlay2,overlay,aufs + version: 19.03.15 +encryptionConfig: null +etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + ServiceAccountIssuerDiscovery: "true" + image: k8s.gcr.io/kube-apiserver:v1.19.0 + insecurePort: 0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://discovery.example.com/minimal.example.com + serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: minimal.example.com + configureCloudRoutes: false + image: k8s.gcr.io/kube-controller-manager:v1.19.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: k8s.gcr.io/kube-proxy:v1.19.0 + logLevel: 2 +kubeScheduler: + image: k8s.gcr.io/kube-scheduler:v1.19.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests +masterKubelet: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: false + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/minimal.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: Master +NodeupConfigHash: yugCig9gPASCPcVtJsR1ek4Kmq9sr1zhkcCKwOOpwUg= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/irsa119/data/aws_launch_template_nodes.minimal.example.com_user_data b/tests/integration/update_cluster/irsa119/data/aws_launch_template_nodes.minimal.example.com_user_data new file mode 100644 index 0000000000000..6172746a10795 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_launch_template_nodes.minimal.example.com_user_data @@ -0,0 +1,176 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: docker +containerd: + configOverride: | + disabled_plugins = ["cri"] + logLevel: info +docker: + ipMasq: false + ipTables: false + logDriver: json-file + logLevel: info + logOpt: + - max-size=10m + - max-file=5 + storage: overlay2,overlay,aufs + version: 19.03.15 +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: k8s.gcr.io/kube-proxy:v1.19.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/minimal.example.com +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: ggQw8fU8hL+hHWwBNTs/MFZ3Ns2CjFOjegL65szcumY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_cluster-completed.spec_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..d2ee6f06ab9d4 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_cluster-completed.spec_content @@ -0,0 +1,219 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal.example.com +spec: + api: + dns: {} + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/minimal.example.com + configStore: memfs://clusters.example.com/minimal.example.com + containerRuntime: docker + containerd: + configOverride: | + disabled_plugins = ["cri"] + logLevel: info + dnsZone: Z1AFAKE1ZON3YO + docker: + ipMasq: false + ipTables: false + logDriver: json-file + logLevel: info + logOpt: + - max-size=10m + - max-file=5 + storage: overlay2,overlay,aufs + version: 19.03.15 + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + serviceAccountExternalPermissions: + - aws: + policyARNs: + - arn:aws-test:iam::123456789012:policy/UsersManageOwnCredentials + name: myserviceaccount + namespace: default + - aws: + policyARNs: + - arn:aws-test:iam::123456789012:policy/UsersManageOwnCredentials + name: myserviceaccount + namespace: test-* + - aws: + inlinePolicy: | + [ + { + "Effect": "Allow", + "Action": ["dynamodb:*"], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": ["es:*"], + "Resource": ["*"] + } + ] + name: myotherserviceaccount + namespace: myapp + keyStore: memfs://clusters.example.com/minimal.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + ServiceAccountIssuerDiscovery: "true" + image: k8s.gcr.io/kube-apiserver:v1.19.0 + insecurePort: 0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://discovery.example.com/minimal.example.com + serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: minimal.example.com + configureCloudRoutes: false + image: k8s.gcr.io/kube-controller-manager:v1.19.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: k8s.gcr.io/dns/k8s-dns-node-cache:1.21.3 + memoryRequest: 5Mi + provider: KubeDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: k8s.gcr.io/kube-proxy:v1.19.0 + logLevel: 2 + kubeScheduler: + image: k8s.gcr.io/kube-scheduler:v1.19.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.19.0 + masterInternalName: api.internal.minimal.example.com + masterKubelet: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: false + masterPublicName: api.minimal.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/minimal.example.com/secrets + serviceAccountIssuerDiscovery: + additionalAudiences: + - sts.amazonaws.com + discoveryStore: memfs://discovery.example.com/minimal.example.com + enableAWSOIDCProvider: true + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_discovery.json_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_discovery.json_content new file mode 100644 index 0000000000000..aba05dfd1a0c4 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_discovery.json_content @@ -0,0 +1,18 @@ +{ +"issuer": "https://discovery.example.com/minimal.example.com", +"jwks_uri": "https://discovery.example.com/minimal.example.com/openid/v1/jwks", +"authorization_endpoint": "urn:kubernetes:programmatic_authorization", +"response_types_supported": [ +"id_token" +], +"subject_types_supported": [ +"public" +], +"id_token_signing_alg_values_supported": [ +"RS256" +], +"claims_supported": [ +"sub", +"iss" +] +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_keys.json_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_keys.json_content new file mode 100644 index 0000000000000..ddcbc6ed75733 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_keys.json_content @@ -0,0 +1,20 @@ +{ +"keys": [ +{ +"use": "sig", +"kty": "RSA", +"kid": "3mNcULfgtWECYyZWY5ow1rOHjiRwEZHx28HQcRec3Ew", +"alg": "RS256", +"n": "2JbeF8dNwqfEKKD65aGlVs58fWkA0qZdVLKw8qATzRBJTi1nqbj2kAR4gyy_C8Mxouxva_om9d7Sq8Ka55T7-w", +"e": "AQAB" +}, +{ +"use": "sig", +"kty": "RSA", +"kid": "G-cZ10iKJqrXhR15ivI7Lg2q_cuL0zN9ouL0vF67FLc", +"alg": "RS256", +"n": "o4Tridlsf4Yz3UAiup_scSTiG_OqxkUW3Fz7zGKvVcLeYj9GEIKuzoB1VFk1nboDq4cCuGLfdzaQdCQKPIsDuw", +"e": "AQAB" +} +] +} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_kops-version.txt_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_kops-version.txt_content new file mode 100644 index 0000000000000..b7340298dcdd5 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-etcdmanager-events_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-etcdmanager-events_content new file mode 100644 index 0000000000000..b826912330d83 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-etcdmanager-events_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.minimal.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1 + image: k8s.gcr.io/etcdadm/etcd-manager:v3.0.20220203 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-etcdmanager-main_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-etcdmanager-main_content new file mode 100644 index 0000000000000..8b9bdce19a713 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-etcdmanager-main_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.minimal.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1 + image: k8s.gcr.io/etcdadm/etcd-manager:v3.0.20220203 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..a30b6f5a1d931 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + command: + - /kube-apiserver-healthcheck + image: k8s.gcr.io/kops/kube-apiserver-healthcheck:1.24.0-alpha.2 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..6f6b8fd5f955f --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content @@ -0,0 +1,54 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 372d72a7fa6ef1fc24cf6e3719f5790dc4e3652ecf3c7b2f5ff8baf9075bbb44 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - manifest: core.addons.k8s.io/v1.4.0.yaml + manifestHash: 18233793a8442224d052e44891e737c67ccfb4e051e95216392319653f4cb0e5 + name: core.addons.k8s.io + selector: + k8s-addon: core.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml + manifestHash: 86f35e6bc4ffa375038449e4fba4b7c9c7d7aa731d3713b7103389d08661a72c + name: kube-dns.addons.k8s.io + selector: + k8s-addon: kube-dns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: ac3d2238b9a073b6d2e28d1668aabb35704ad01e96d586147006e074c8efb0f8 + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-core.addons.k8s.io_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-core.addons.k8s.io_content new file mode 100644 index 0000000000000..a334917278a4b --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-core.addons.k8s.io_content @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: core.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: core.addons.k8s.io + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: core.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: core.addons.k8s.io + name: kube-dns + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: core.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: core.addons.k8s.io + name: kube-proxy + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: core.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: core.addons.k8s.io + name: kubeadm:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- apiGroup: "" + kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..3b626ad0ed22b --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,127 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.24.0-alpha.2 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.24.0-alpha.2 + spec: + containers: + - command: + - /dns-controller + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: k8s.gcr.io/kops/dns-controller:1.24.0-alpha.2 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..d866052db0916 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,208 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/minimal.example.com","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.24.0-alpha.2 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.24.0-alpha.2 + spec: + containers: + - command: + - /kops-controller + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: k8s.gcr.io/kops/kops-controller:1.24.0-alpha.2 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: + kops.k8s.io/kops-controller-pki: "" + node-role.kubernetes.io/master: "" + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..94f90bbdb799b --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,327 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + k8s-app: kube-dns-autoscaler + kubernetes.io/cluster-service: "true" + name: kube-dns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns-autoscaler + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-app: kube-dns-autoscaler + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=kube-dns-autoscaler + - --target=Deployment/kube-dns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + priorityClassName: system-cluster-critical + serviceAccountName: kube-dns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + name: kube-dns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + template: + metadata: + annotations: + prometheus.io/port: "10055" + prometheus.io/scrape: "true" + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-app: kube-dns + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - kube-dns + topologyKey: kubernetes.io/hostname + weight: 1 + containers: + - args: + - --config-dir=/kube-dns-config + - --dns-port=10053 + - --domain=cluster.local. + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + image: k8s.gcr.io/k8s-dns-kube-dns:1.15.13 + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: kubedns + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + initialDelaySeconds: 3 + timeoutSeconds: 5 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + volumeMounts: + - mountPath: /kube-dns-config + name: kube-dns-config + - args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --dns-forward-max=150 + - --no-negcache + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/in6.arpa/127.0.0.1#10053 + - --min-port=1024 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.15.13 + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: dnsmasq + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - mountPath: /etc/k8s/dns/dnsmasq-nanny + name: kube-dns-config + - args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A + image: k8s.gcr.io/k8s-dns-sidecar:1.15.13 + livenessProbe: + failureThreshold: 5 + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: sidecar + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + cpu: 10m + memory: 20Mi + dnsPolicy: Default + priorityClassName: system-cluster-critical + serviceAccountName: kube-dns + volumes: + - configMap: + name: kube-dns + optional: true + name: kube-dns-config + +--- + +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: KubeDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + name: kube-dns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + name: kube-dns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + name: kube-dns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-dns-autoscaler +subjects: +- kind: ServiceAccount + name: kube-dns-autoscaler + namespace: kube-system + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kube-dns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kube-dns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..36761e1c56255 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..4dcdce48b9ab9 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000000..21efd54326518 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000000..c097c4d6c311f --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,264 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + ServiceAccountIssuerDiscovery: "true" + image: k8s.gcr.io/kube-apiserver:v1.19.0 + insecurePort: 0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://discovery.example.com/minimal.example.com + serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 3f03e5c160a8b658d30b34824a1c00abadbac96e62c4d01bf5c9271a2debc3ab@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubelet + - 79bb0d2f05487ff533999a639c075043c70a0a1ba25c1629eb1eef6ebe3ba70f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 5504d190eef37355231325c176686d51ade6e0cabe2da526d561a38d8611506f@https://download.docker.com/linux/static/stable/x86_64/docker-19.03.15.tgz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - d8fa5a9739ecc387dfcc55afa91ac6f4b0ccd01f1423c423dbd312d787bbb6bf@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubelet + - d4adf1b6b97252025cb2f7febf55daa3f42dc305822e3da133f77fd33071ec2f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 264f3396630507606a8646fda6a28a98d3ced8927df84be8ee9a74ab73cc1566@https://download.docker.com/linux/static/stable/aarch64/docker-19.03.15.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: minimal.example.com +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: false +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml +etcdManifests: +- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml +- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..d071babab4ab5 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/data/aws_s3_bucket_object_nodeupconfig-nodes_content @@ -0,0 +1,58 @@ +Assets: + amd64: + - 3f03e5c160a8b658d30b34824a1c00abadbac96e62c4d01bf5c9271a2debc3ab@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubelet + - 79bb0d2f05487ff533999a639c075043c70a0a1ba25c1629eb1eef6ebe3ba70f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 5504d190eef37355231325c176686d51ade6e0cabe2da526d561a38d8611506f@https://download.docker.com/linux/static/stable/x86_64/docker-19.03.15.tgz + arm64: + - d8fa5a9739ecc387dfcc55afa91ac6f4b0ccd01f1423c423dbd312d787bbb6bf@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubelet + - d4adf1b6b97252025cb2f7febf55daa3f42dc305822e3da133f77fd33071ec2f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 264f3396630507606a8646fda6a28a98d3ced8927df84be8ee9a74ab73cc1566@https://download.docker.com/linux/static/stable/aarch64/docker-19.03.15.tgz +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: minimal.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: k8s.gcr.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml diff --git a/tests/integration/update_cluster/irsa119/id_rsa.pub b/tests/integration/update_cluster/irsa119/id_rsa.pub new file mode 100755 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/irsa119/in-v1alpha2.yaml b/tests/integration/update_cluster/irsa119/in-v1alpha2.yaml new file mode 100644 index 0000000000000..bd19ab9764a36 --- /dev/null +++ b/tests/integration/update_cluster/irsa119/in-v1alpha2.yaml @@ -0,0 +1,110 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal.example.com +spec: + kubernetesApiAccess: + - 0.0.0.0/0 + channel: stable + cloudProvider: aws + configBase: memfs://clusters.example.com/minimal.example.com + etcdClusters: + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + iam: + serviceAccountExternalPermissions: + - name: myserviceaccount + namespace: default + aws: + policyARNs: + - arn:aws-test:iam::123456789012:policy/UsersManageOwnCredentials + - name: myserviceaccount + namespace: test-* + aws: + policyARNs: + - arn:aws-test:iam::123456789012:policy/UsersManageOwnCredentials + - name: myotherserviceaccount + namespace: myapp + aws: + inlinePolicy: | + [ + { + "Effect": "Allow", + "Action": ["dynamodb:*"], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": ["es:*"], + "Resource": ["*"] + } + ] + kubelet: + anonymousAuth: false + kubernetesVersion: v1.19.0 + masterInternalName: api.internal.minimal.example.com + masterPublicName: api.minimal.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + serviceAccountIssuerDiscovery: + enableAWSOIDCProvider: true + discoveryStore: memfs://discovery.example.com/minimal.example.com + additionalAudiences: + - sts.amazonaws.com + sshAccess: + - 0.0.0.0/0 + topology: + masters: public + nodes: public + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: nodes + labels: + kops.k8s.io/cluster: minimal.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: master-us-test-1a + labels: + kops.k8s.io/cluster: minimal.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a diff --git a/tests/integration/update_cluster/irsa119/kubernetes.tf b/tests/integration/update_cluster/irsa119/kubernetes.tf new file mode 100644 index 0000000000000..59f6ec30106de --- /dev/null +++ b/tests/integration/update_cluster/irsa119/kubernetes.tf @@ -0,0 +1,939 @@ +locals { + cluster_name = "minimal.example.com" + default-myserviceaccount_role_arn = aws_iam_role.myserviceaccount-default-sa-minimal-example-com.arn + default-myserviceaccount_role_name = aws_iam_role.myserviceaccount-default-sa-minimal-example-com.name + iam_openid_connect_provider_arn = aws_iam_openid_connect_provider.minimal-example-com.arn + iam_openid_connect_provider_issuer = "discovery.example.com/minimal.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id] + master_security_group_ids = [aws_security_group.masters-minimal-example-com.id] + masters_role_arn = aws_iam_role.masters-minimal-example-com.arn + masters_role_name = aws_iam_role.masters-minimal-example-com.name + myapp-myotherserviceaccount_role_arn = aws_iam_role.myotherserviceaccount-myapp-sa-minimal-example-com.arn + myapp-myotherserviceaccount_role_name = aws_iam_role.myotherserviceaccount-myapp-sa-minimal-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-example-com.id] + node_security_group_ids = [aws_security_group.nodes-minimal-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-minimal-example-com.id] + nodes_role_arn = aws_iam_role.nodes-minimal-example-com.arn + nodes_role_name = aws_iam_role.nodes-minimal-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.minimal-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-example-com.id + test-wildcard-myserviceaccount_role_arn = aws_iam_role.myserviceaccount-test-wildcard-sa-minimal-example-com.arn + test-wildcard-myserviceaccount_role_name = aws_iam_role.myserviceaccount-test-wildcard-sa-minimal-example-com.name + vpc_cidr_block = aws_vpc.minimal-example-com.cidr_block + vpc_id = aws_vpc.minimal-example-com.id +} + +output "cluster_name" { + value = "minimal.example.com" +} + +output "default-myserviceaccount_role_arn" { + value = aws_iam_role.myserviceaccount-default-sa-minimal-example-com.arn +} + +output "default-myserviceaccount_role_name" { + value = aws_iam_role.myserviceaccount-default-sa-minimal-example-com.name +} + +output "iam_openid_connect_provider_arn" { + value = aws_iam_openid_connect_provider.minimal-example-com.arn +} + +output "iam_openid_connect_provider_issuer" { + value = "discovery.example.com/minimal.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-minimal-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-minimal-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-minimal-example-com.name +} + +output "myapp-myotherserviceaccount_role_arn" { + value = aws_iam_role.myotherserviceaccount-myapp-sa-minimal-example-com.arn +} + +output "myapp-myotherserviceaccount_role_name" { + value = aws_iam_role.myotherserviceaccount-myapp-sa-minimal-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-minimal-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-minimal-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-minimal-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-minimal-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-minimal-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.minimal-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-minimal-example-com.id +} + +output "test-wildcard-myserviceaccount_role_arn" { + value = aws_iam_role.myserviceaccount-test-wildcard-sa-minimal-example-com.arn +} + +output "test-wildcard-myserviceaccount_role_name" { + value = aws_iam_role.myserviceaccount-test-wildcard-sa-minimal-example-com.name +} + +output "vpc_cidr_block" { + value = aws_vpc.minimal-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.minimal-example-com.id +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-minimal-example-com.id + version = aws_launch_template.master-us-test-1a-masters-minimal-example-com.latest_version + } + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.minimal.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "minimal.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.minimal.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/minimal.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-minimal-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-minimal-example-com.id + version = aws_launch_template.nodes-minimal-example-com.latest_version + } + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.minimal.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "minimal.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.minimal.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/minimal.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "us-test-1a.etcd-events.minimal.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "us-test-1a.etcd-main.minimal.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-minimal-example-com" { + name = "masters.minimal.example.com" + role = aws_iam_role.masters-minimal-example-com.name + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "masters.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-minimal-example-com" { + name = "nodes.minimal.example.com" + role = aws_iam_role.nodes-minimal-example-com.name + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "nodes.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_openid_connect_provider" "minimal-example-com" { + client_id_list = ["amazonaws.com", "sts.amazonaws.com"] + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + thumbprint_list = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280", "a9d53002e97e00e043244f3d170d6f4c414104fd"] + url = "https://discovery.example.com/minimal.example.com" +} + +resource "aws_iam_role" "masters-minimal-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal.example.com_policy") + name = "masters.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "masters.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_role" "myotherserviceaccount-myapp-sa-minimal-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_myotherserviceaccount.myapp.sa.minimal.example.com_policy") + name = "myotherserviceaccount.myapp.sa.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "myotherserviceaccount.myapp.sa.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_role" "myserviceaccount-default-sa-minimal-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_myserviceaccount.default.sa.minimal.example.com_policy") + name = "myserviceaccount.default.sa.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "myserviceaccount.default.sa.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_role" "myserviceaccount-test-wildcard-sa-minimal-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_myserviceaccount.test-wildcard.sa.minimal.example.com_policy") + name = "myserviceaccount.test-wildcard.sa.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "myserviceaccount.test-wildcard.sa.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-minimal-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal.example.com_policy") + name = "nodes.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "nodes.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-minimal-example-com" { + name = "masters.minimal.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal.example.com_policy") + role = aws_iam_role.masters-minimal-example-com.name +} + +resource "aws_iam_role_policy" "myotherserviceaccount-myapp-sa-minimal-example-com" { + name = "myotherserviceaccount.myapp.sa.minimal.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_myotherserviceaccount.myapp.sa.minimal.example.com_policy") + role = aws_iam_role.myotherserviceaccount-myapp-sa-minimal-example-com.name +} + +resource "aws_iam_role_policy" "nodes-minimal-example-com" { + name = "nodes.minimal.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal.example.com_policy") + role = aws_iam_role.nodes-minimal-example-com.name +} + +resource "aws_iam_role_policy_attachment" "external-myserviceaccount-default-sa-minimal-example-com-3197825879" { + policy_arn = "arn:aws-test:iam::123456789012:policy/UsersManageOwnCredentials" + role = aws_iam_role.myserviceaccount-default-sa-minimal-example-com.name +} + +resource "aws_iam_role_policy_attachment" "external-myserviceaccount-test-wildcard-sa-minimal-example-com-3197825879" { + policy_arn = "arn:aws-test:iam::123456789012:policy/UsersManageOwnCredentials" + role = aws_iam_role.myserviceaccount-test-wildcard-sa-minimal-example-com.name +} + +resource "aws_internet_gateway" "minimal-example-com" { + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-example-com.id +} + +resource "aws_key_pair" "kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-minimal-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.minimal.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-minimal-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "master-us-test-1a.masters.minimal.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "master-us-test-1a.masters.minimal.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "master-us-test-1a.masters.minimal.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data") +} + +resource "aws_launch_template" "nodes-minimal-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-minimal-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.minimal.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-minimal-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "nodes.minimal.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "nodes.minimal.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "nodes.minimal.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.minimal-example-com.id + route_table_id = aws_route_table.minimal-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.minimal-example-com.id + route_table_id = aws_route_table.minimal-example-com.id +} + +resource "aws_route_table" "minimal-example-com" { + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.minimal-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-minimal-example-com" { + route_table_id = aws_route_table.minimal-example-com.id + subnet_id = aws_subnet.us-test-1a-minimal-example-com.id +} + +resource "aws_s3_bucket_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_cluster-completed.spec_content") + key = "clusters.example.com/minimal.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "discovery-json" { + acl = "public-read" + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_discovery.json_content") + key = "discovery.example.com/minimal.example.com/.well-known/openid-configuration" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/minimal.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/minimal.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "keys-json" { + acl = "public-read" + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_keys.json_content") + key = "discovery.example.com/minimal.example.com/openid/v1/jwks" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_kops-version.txt_content") + key = "clusters.example.com/minimal.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "manifests-etcdmanager-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_manifests-etcdmanager-events_content") + key = "clusters.example.com/minimal.example.com/manifests/etcd/events.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "manifests-etcdmanager-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_manifests-etcdmanager-main_content") + key = "clusters.example.com/minimal.example.com/manifests/etcd/main.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/minimal.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content") + key = "clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-core-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-core.addons.k8s.io_content") + key = "clusters.example.com/minimal.example.com/addons/core.addons.k8s.io/v1.4.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/minimal.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/minimal.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-kube-dns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/minimal.example.com/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/minimal.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/minimal.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "minimal-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/minimal.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/minimal.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_bucket_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_bucket_object_nodeupconfig-nodes_content") + key = "clusters.example.com/minimal.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-minimal-example-com" { + description = "Security group for masters" + name = "masters.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "masters.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-example-com.id +} + +resource "aws_security_group" "nodes-minimal-example-com" { + description = "Security group for nodes" + name = "nodes.minimal.example.com" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "nodes.minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-masters-minimal-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-example-com.id + source_security_group_id = aws_security_group.masters-minimal-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-example-com.id + source_security_group_id = aws_security_group.masters-minimal-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-1to2379-masters-minimal-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-2382to4000-masters-minimal-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-4003to65535-masters-minimal-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1to65535-masters-minimal-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-minimal-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-minimal-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "us-test-1a.minimal.example.com" + "SubnetType" = "Public" + "kubernetes.io/cluster/minimal.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.minimal-example-com.id +} + +resource "aws_vpc" "minimal-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "minimal-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "minimal.example.com" + "Name" = "minimal.example.com" + "kubernetes.io/cluster/minimal.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "minimal-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.minimal-example-com.id + vpc_id = aws_vpc.minimal-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 3.71.0" + } + } +}