Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

EKS - Terraform #113

Merged
merged 5 commits into from May 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
39 changes: 39 additions & 0 deletions aws/eks/eks-terraform-scripts/README.md
@@ -0,0 +1,39 @@
# EKS - Terraform

## Requirements

Before running the Terraform scripts, make sure to set the [IAM required permissions](./iam-required-permissions.md) first. You will have to remove the comments in the code if you decide to copy/paste them. Remember it is a best practice to use a role and assign this permissions as a managed policy rather than inline.

## Instructions

1. Make sure you have the following installed and configured:
- AWS CLI
- AWS IAM Authenticator
- kubectl
- wget
2. Set your working directory to: **eks-terraform-scripts**.
3. Execute:
```shell
terraform init
```
4. Execute:
```shell
terraform apply
```
5. Confirm the apply with a:
```shell
yes
```
6. Execute:
```shell
terraform apply
```
7. At this point you can configure **kubectl**:
```shell
aws eks --region $(terraform output -raw region) update-kubeconfig --name $(terraform output -raw cluster_name)
```
8. What you decide to do next is up to you; the cluster is ready for you to work with it.
9. Clean up:
```shell
terraform destroy
```
50 changes: 50 additions & 0 deletions aws/eks/eks-terraform-scripts/eks-cluster.tf
@@ -0,0 +1,50 @@
resource "aws_kms_key" "eks" {
description = "EKS secret encryption key."
}

module "eks" {
source = "terraform-aws-modules/eks/aws"
cluster_name = local.cluster_name
cluster_version = "1.18"
subnets = module.vpc.private_subnets

cluster_encryption_config = [
{
provider_key_arn = aws_kms_key.eks.arn
resources = ["secrets"]
}
]

tags = {
"environment" = "cloud-service-certification"
}

vpc_id = module.vpc.vpc_id

workers_group_defaults = {
root_volume_type = "gp2"
}

worker_groups = [
{
name = "worker-group-1"
instance_type = "t2.small"
asg_desired_capacity = 2
additional_security_group_ids = [aws_security_group.worker_group_one.id]
},
{
name = "worker-group-2"
instance_type = "t2.medium"
asg_desired_capacity = 1
additional_security_group_ids = [aws_security_group.worker_group_two.id]
},
]
}

data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}

data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
147 changes: 147 additions & 0 deletions aws/eks/eks-terraform-scripts/iam-required-permissions.md
@@ -0,0 +1,147 @@
# EKS - Terraform - IAM Required Permissions

The following IAM permissions are the minimum permissions required for your IAM user or IAM role to create an EKS cluster using the Terraform scripts provided.

```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"autoscaling:AttachInstances",
"autoscaling:CreateAutoScalingGroup",
"autoscaling:CreateLaunchConfiguration",
"autoscaling:CreateOrUpdateTags",
"autoscaling:DeleteAutoScalingGroup",
"autoscaling:DeleteLaunchConfiguration",
"autoscaling:DeleteTags",
"autoscaling:Describe*",
"autoscaling:DetachInstances",
"autoscaling:SetDesiredCapacity",
"autoscaling:UpdateAutoScalingGroup",
"autoscaling:SuspendProcesses",
"ec2:AllocateAddress",
"ec2:AssignPrivateIpAddresses",
"ec2:Associate*",
"ec2:AttachInternetGateway",
"ec2:AttachNetworkInterface",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateDefaultSubnet",
"ec2:CreateDhcpOptions",
"ec2:CreateEgressOnlyInternetGateway",
"ec2:CreateInternetGateway",
"ec2:CreateNatGateway",
"ec2:CreateNetworkInterface",
"ec2:CreateRoute",
"ec2:CreateRouteTable",
"ec2:CreateSecurityGroup",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateVpc",
"ec2:CreateVpcEndpoint",
"ec2:DeleteDhcpOptions",
"ec2:DeleteEgressOnlyInternetGateway",
"ec2:DeleteInternetGateway",
"ec2:DeleteNatGateway",
"ec2:DeleteNetworkInterface",
"ec2:DeleteRoute",
"ec2:DeleteRouteTable",
"ec2:DeleteSecurityGroup",
"ec2:DeleteSubnet",
"ec2:DeleteTags",
"ec2:DeleteVolume",
"ec2:DeleteVpc",
"ec2:DeleteVpnGateway",
"ec2:Describe*",
"ec2:DetachInternetGateway",
"ec2:DetachNetworkInterface",
"ec2:DetachVolume",
"ec2:Disassociate*",
"ec2:ModifySubnetAttribute",
"ec2:ModifyVpcAttribute",
"ec2:ModifyVpcEndpoint",
"ec2:ReleaseAddress",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:UpdateSecurityGroupRuleDescriptionsEgress",
"ec2:UpdateSecurityGroupRuleDescriptionsIngress",
"ec2:CreateLaunchTemplate",
"ec2:CreateLaunchTemplateVersion",
"ec2:DeleteLaunchTemplate",
"ec2:DeleteLaunchTemplateVersions",
"ec2:DescribeLaunchTemplates",
"ec2:DescribeLaunchTemplateVersions",
"ec2:GetLaunchTemplateData",
"ec2:ModifyLaunchTemplate",
"ec2:RunInstances",
"eks:CreateCluster",
"eks:DeleteCluster",
"eks:DescribeCluster",
"eks:ListClusters",
"eks:UpdateClusterConfig",
"eks:UpdateClusterVersion",
"eks:DescribeUpdate",
"eks:TagResource",
"eks:UntagResource",
"eks:ListTagsForResource",
"eks:CreateFargateProfile",
"eks:DeleteFargateProfile",
"eks:DescribeFargateProfile",
"eks:ListFargateProfiles",
"eks:CreateNodegroup",
"eks:DeleteNodegroup",
"eks:DescribeNodegroup",
"eks:ListNodegroups",
"eks:UpdateNodegroupConfig",
"eks:UpdateNodegroupVersion",
"iam:AddRoleToInstanceProfile",
"iam:AttachRolePolicy",
"iam:CreateInstanceProfile",
"iam:CreateOpenIDConnectProvider",
"iam:CreateServiceLinkedRole",
"iam:CreatePolicy",
"iam:CreatePolicyVersion",
"iam:CreateRole",
"iam:DeleteInstanceProfile",
"iam:DeleteOpenIDConnectProvider",
"iam:DeletePolicy",
"iam:DeleteRole",
"iam:DeleteRolePolicy",
"iam:DeleteServiceLinkedRole",
"iam:DetachRolePolicy",
"iam:GetInstanceProfile",
"iam:GetOpenIDConnectProvider",
"iam:GetPolicy",
"iam:GetPolicyVersion",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:List*",
"iam:PassRole",
"iam:PutRolePolicy",
"iam:RemoveRoleFromInstanceProfile",
"iam:TagRole",
"iam:UntagRole",
"iam:UpdateAssumeRolePolicy",
// The permissions below are needed for logging.
"logs:CreateLogGroup",
"logs:DescribeLogGroups",
"logs:DeleteLogGroup",
"logs:ListTagsLogGroup",
"logs:PutRetentionPolicy",
// The permissions below are needed for secrets encryption in the EKS cluster.
"kms:CreateGrant",
"kms:CreateKey",
"kms:DescribeKey",
"kms:GetKeyPolicy",
"kms:GetKeyRotationStatus",
"kms:ListResourceTags",
"kms:ScheduleKeyDeletion"
],
"Resource": "*"
}
]
}
```
18 changes: 18 additions & 0 deletions aws/eks/eks-terraform-scripts/kubernetes-admin.rbac.yaml
@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
19 changes: 19 additions & 0 deletions aws/eks/eks-terraform-scripts/kubernetes.tf
@@ -0,0 +1,19 @@
# The Kubernetes provider is included in this file so the EKS module can complete successfully.
# Otherwise, it throws an error when creating `kubernetes_config_map.aws_auth`.
# You should **not** schedule deployments and services in this workspace.
# This keeps workspaces modular (one for provision EKS and another for scheduling Kubernetes resources) as per best practices.

provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
args = [
"eks",
"get-token",
"--cluster-name",
data.aws_eks_cluster.cluster.name
]
}
}
34 changes: 34 additions & 0 deletions aws/eks/eks-terraform-scripts/outputs.tf
@@ -0,0 +1,34 @@
output "cluster_id" {
description = "EKS cluster ID."
value = module.eks.cluster_id
}

output "cluster_endpoint" {
description = "Endpoint for EKS control plane."
value = module.eks.cluster_endpoint
}

output "cluster_security_group_id" {
description = "Security group IDs attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}

output "kubectl_config" {
description = "kubectl config as generated by the module."
value = module.eks.kubeconfig
}

output "config_map_aws_auth" {
description = "A Kubernetes configuration to authenticate to this EKS cluster."
value = module.eks.config_map_aws_auth
}

output "region" {
description = "AWS region."
value = var.region
}

output "cluster_name" {
description = "Kubernetes cluster name."
value = local.cluster_name
}
46 changes: 46 additions & 0 deletions aws/eks/eks-terraform-scripts/security-groups.tf
@@ -0,0 +1,46 @@
resource "aws_security_group" "worker_group_one" {
name_prefix = "worker_group_one"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"

cidr_blocks = [
"10.0.0.0/8",
]
}
}

resource "aws_security_group" "worker_group_two" {
name_prefix = "worker_group_two"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"

cidr_blocks = [
"192.168.0.0/16",
]
}
}

resource "aws_security_group" "all_workers" {
name_prefix = "all_workers"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"

cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
}