Skip to content

Commit

Permalink
feat: Blueprint to demonstrate installation of multiple nginx ingress…
Browse files Browse the repository at this point in the history
… controllers for internal and public traffic split (#1734)

Co-authored-by: Apoorva Kulkarni <kuapoorv@amazon.com>
  • Loading branch information
RobertNorthard and askulkarni2 authored Aug 17, 2023
1 parent 03db0c8 commit fa5d17d
Show file tree
Hide file tree
Showing 6 changed files with 347 additions and 0 deletions.
7 changes: 7 additions & 0 deletions docs/blueprints/private-public-ingress.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
title: Private and Public Ingress
---

{%
include-markdown "../../examples/private-public-ingress/README.md"
%}
40 changes: 40 additions & 0 deletions examples/private-public-ingress/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Amazon EKS Private and Public Ingress example

This example demonstrates how to provision an Amazon EKS cluster with two ingress-nginx controllers; one to expose applications publicly and the other to expose applications internally. It also assigns security groups to the Network Load Balancers used to expose the internal and external ingress controllers.

This solution:
* Deploys Amazon EKS, with 1 Managed Node Group using the Bottlerocket Amazon EKS Optimized AMI spread accross 3 availability zones.
* Installs the AWS Load Balancer controller for creating Network Load Balancers and Application Load Balancers. This is the recommended approach instead of the in-tree AWS cloud provider load balancer controller.
* Installs an ingress-nginx controller for public traffic
* Installs an ingress-nginx controller for internal traffic

To expose your application services via an `Ingress` resource with this solution you can set the respective `ingressClassName` as either `ingress-nginx-external` or `ingress-nginx-internal`.

Refer to the [documentation](https://kubernetes-sigs.github.io/aws-load-balancer-controller) for `AWS Load Balancer controller` configuration options.

## Prerequisites:

Ensure that you have the following tools installed locally:

1. [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html)
2. [kubectl](https://Kubernetes.io/docs/tasks/tools/)
3. [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli)

## Deploy

To provision this example:

```sh
terraform init
terraform apply
```

Enter `yes` at command prompt to apply

## Destroy

To teardown and remove the resources created in this example:

```sh
terraform destroy -auto-approve
```
274 changes: 274 additions & 0 deletions examples/private-public-ingress/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,274 @@
provider "aws" {
region = local.region
}

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}

provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}

provider "kubectl" {
apply_retry_count = 10
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}

data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_name
}

data "aws_availability_zones" "available" {}

locals {
region = "eu-west-1"
name = "eks-private-public-ingress"

vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)

tags = {
Blueprint = local.name
GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
}
}

resource "aws_security_group" "ingress_nginx_external_sg" {
name = "ingress-nginx-external-sg"
description = "Allow public HTTP and HTTPS traffic"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] # modify to your requirements
}

ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] # modify to your requirements
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

/*
Deploy the ingress-nginx controller, exposed by an internet facing Network Load Balancer
*/
module "eks_blueprints_kubernetes_addons_nginx_external" {
source = "aws-ia/eks-blueprints-addons/aws"
version = "~> 1.0"

cluster_name = module.eks.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
cluster_version = module.eks.cluster_version
oidc_provider_arn = module.eks.oidc_provider_arn

enable_ingress_nginx = true

ingress_nginx = {
name = "ingress-nginx-external"
values = [
<<-EOT
controller:
replicaCount: 3
service:
annotations:
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
service.beta.kubernetes.io/aws-load-balancer-security-groups: ${aws_security_group.ingress_nginx_external_sg.id}
service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules: true
loadBalancerClass: service.k8s.aws/nlb
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/instance: ingress-nginx-external
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/instance: ingress-nginx-external
minAvailable: 2
ingressClassResource:
name: ingress-nginx-external
default: false
EOT
]
}
}

resource "aws_security_group" "ingress_nginx_internal_sg" {
name = "ingress-nginx-internal-sg"
description = "Allow local HTTP and HTTPS traffic"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = [local.vpc_cidr] # modify to your requirements
}

ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [local.vpc_cidr] # modify to your requirements
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

/*
Deploy the ingress-nginx controller, exposed by an internal Network Load Balancer
*/
module "eks_blueprints_kubernetes_addons_nginx_internal" {
source = "aws-ia/eks-blueprints-addons/aws"
version = "~> 1.6.0"

cluster_name = module.eks.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
cluster_version = module.eks.cluster_version
oidc_provider_arn = module.eks.oidc_provider_arn

enable_ingress_nginx = true

ingress_nginx = {
name = "ingress-nginx-internal"
values = [
<<-EOT
controller:
replicaCount: 3
service:
annotations:
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
service.beta.kubernetes.io/aws-load-balancer-security-groups: ${aws_security_group.ingress_nginx_internal_sg.id}
service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules: true
loadBalancerClass: service.k8s.aws/nlb
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/instance: ingress-nginx-internal
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/instance: ingress-nginx-internal
minAvailable: 2
ingressClassResource:
name: ingress-nginx-internal
default: false
EOT
]
}
}

module "eks_blueprints_kubernetes_addons" {
source = "aws-ia/eks-blueprints-addons/aws"
version = "~> 1.0"

cluster_name = module.eks.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
cluster_version = module.eks.cluster_version
oidc_provider_arn = module.eks.oidc_provider_arn

enable_aws_load_balancer_controller = true
aws_load_balancer_controller = {
chart_version = "1.6.0" # min version required to use SG for NLB feature
}

tags = local.tags

depends_on = [module.eks]
}

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 19.15.3"

cluster_name = local.name
cluster_version = "1.27"
cluster_endpoint_public_access = true

vpc_id = module.vpc.vpc_id
subnet_ids = slice(module.vpc.private_subnets, 0, 3)

eks_managed_node_groups = {
core_node_group = {
instance_types = ["m5.large"]

ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"

min_size = 3
max_size = 3
desired_size = 3
}
}

tags = local.tags
}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.0.0"

name = local.name
cidr = local.vpc_cidr

azs = local.azs
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)]
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)]

enable_nat_gateway = true
single_nat_gateway = true

manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
default_route_table_tags = { Name = "${local.name}-default" }
manage_default_security_group = true
default_security_group_tags = { Name = "${local.name}-default" }

public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}

private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}

tags = local.tags
}
4 changes: 4 additions & 0 deletions examples/private-public-ingress/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
value = "aws eks update-kubeconfig --name ${module.eks.cluster_name} --alias ${module.eks.cluster_name} --region ${local.region}"
}
Empty file.
22 changes: 22 additions & 0 deletions examples/private-public-ingress/versions.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
terraform {
required_version = ">= 1.0.0"

required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.14"
}
}
}

0 comments on commit fa5d17d

Please sign in to comment.