From e3d562bcdb0da3bc31c079907c9e04e0c1a43c68 Mon Sep 17 00:00:00 2001 From: Rajitha Perera Date: Thu, 20 Sep 2018 10:31:28 -0400 Subject: [PATCH] Support for AWS cloud-config (#1465) * Support for AWS cloud-config * Update docs * Fix version incompatibilities * Do not use shorthand `default` * Add new cloud config variable, roleArn --- docs/aws.md | 20 ++++++++++++++++++- .../manifests/kube-apiserver.manifest.j2 | 4 +--- .../kube-controller-manager.manifest.j2 | 12 +++++------ roles/kubernetes/node/tasks/main.yml | 2 +- .../node/templates/aws-cloud-config.j2 | 17 ++++++++++++++++ .../node/templates/kubelet.kubeadm.env.j2 | 6 ++---- .../node/templates/kubelet.standard.env.j2 | 6 ++---- 7 files changed, 47 insertions(+), 20 deletions(-) create mode 100644 roles/kubernetes/node/templates/aws-cloud-config.j2 diff --git a/docs/aws.md b/docs/aws.md index 28cdc89deb2..119c3a5938f 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -1,7 +1,7 @@ AWS =============== -To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. +To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider. Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. @@ -58,3 +58,21 @@ export AWS_SECRET_ACCESS_KEY="yyyyy" export REGION="us-east-2" ``` - We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` + +## Kubespray configuration + +Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case. + +Variable|Type|Comment +---|---|--- +aws_zone|string|Force set the AWS zone. Recommended to leave blank. +aws_vpc|string|The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided +aws_subnet_id|string|SubnetID enables using a specific subnet to use for ELB's +aws_route_table_id|string|RouteTableID enables using a specific RouteTable +aws_role_arn|string|RoleARN is the IAM role to assume when interaction with AWS APIs +aws_kubernetes_cluster_tag|string|KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources +aws_kubernetes_cluster_id|string|KubernetesClusterID is the cluster id we'll use to identify our cluster resources +aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000. +aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead. +aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment. + diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index e1023d088fd..a157fec62db 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -116,11 +116,9 @@ spec: {% endif %} - --v={{ kube_log_level }} - --allow-privileged=true -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %} - --cloud-provider={{ cloud_provider }} - --cloud-config={{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined and cloud_provider == "aws" %} - - --cloud-provider={{ cloud_provider }} {% endif %} {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - --anonymous-auth={{ kube_api_anonymous_auth }} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index ac7dbd2ecd9..674c1a4cf38 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -43,13 +43,11 @@ spec: {% if rbac_enabled %} - --use-service-account-credentials=true {% endif %} -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %} - --cloud-provider={{cloud_provider}} - --cloud-config={{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} - - --cloud-provider={{cloud_provider}} -{% elif cloud_provider is defined and cloud_provider == "oci" %} - - --cloud_provider=external +{% elif cloud_provider is defined and cloud_provider in ["external", "oci"] %} + - --cloud-provider=external {% endif %} {% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} - --configure-cloud-routes=true @@ -92,7 +90,7 @@ spec: - mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" name: kubeconfig readOnly: true -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %} - mountPath: "{{ kube_config_dir }}/cloud_config" name: cloudconfig readOnly: true @@ -117,7 +115,7 @@ spec: - name: kubeconfig hostPath: path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %} - hostPath: path: "{{ kube_config_dir }}/cloud_config" name: cloudconfig diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index db9cbd3090f..785849f20a8 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -189,7 +189,7 @@ mode: 0640 when: - cloud_provider is defined - - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + - cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws' ] notify: restart kubelet tags: - cloud-provider diff --git a/roles/kubernetes/node/templates/aws-cloud-config.j2 b/roles/kubernetes/node/templates/aws-cloud-config.j2 new file mode 100644 index 00000000000..918ab310073 --- /dev/null +++ b/roles/kubernetes/node/templates/aws-cloud-config.j2 @@ -0,0 +1,17 @@ +[Global] +{% if kube_version | version_compare('v1.6', '>=') %} +zone={{ aws_zone|default("") }} +vpc={{ aws_vpc|default("") }} +subnetId={{ aws_subnet_id|default("") }} +routeTableId={{ aws_route_table_id|default("") }} +{% if kube_version | version_compare('v1.10', '>=') %} +roleArn={{ aws_role_arn|default("") }} +{% endif %} +kubernetesClusterTag={{ aws_kubernetes_cluster_tag|default("") }} +kubernetesClusterId={{ aws_kubernetes_cluster_id|default("") }} +disableSecurityGroupIngress={{ "true" if aws_disable_security_group_ingress|default(False) else "false" }} +disableStrictZoneCheck={{ "true" if aws_disable_strict_zone_check|default(False) else "false" }} +{% if kube_version | version_compare('v1.7', '>=') %} +elbSecurityGroup={{ aws_elb_security_group|default("") }} +{% endif %} +{% endif %} diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index 385102f48cc..a1282a03cdb 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -102,11 +102,9 @@ KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kuben {% endif %} # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=true" -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %} KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config" -{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} -KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}" -{% elif cloud_provider is defined and cloud_provider == "oci" %} +{% elif cloud_provider is defined and cloud_provider in ["oci", "external"] %} KUBELET_CLOUDPROVIDER="--cloud-provider=external" {% else %} KUBELET_CLOUDPROVIDER="" diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 5686b29e164..a01e2f8f89d 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -134,13 +134,11 @@ KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }} # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=true" -{% if cloud_provider is defined and cloud_provider in ["openstack", "vsphere"] %} +{% if cloud_provider is defined and cloud_provider in ["openstack", "vsphere", "aws"] %} KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config" {% elif cloud_provider is defined and cloud_provider in ["azure"] %} KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config --azure-container-registry-config={{ kube_config_dir }}/cloud_config" -{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} -KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}" -{% elif cloud_provider is defined and cloud_provider == "oci" %} +{% elif cloud_provider is defined and cloud_provider in ["oci", "external"] %} KUBELET_CLOUDPROVIDER="--cloud-provider=external" {% else %} KUBELET_CLOUDPROVIDER=""