Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
Merge pull request #367 from kinvolk/johananl/fix-aws
Browse files Browse the repository at this point in the history
  • Loading branch information
iaguis authored May 12, 2020
2 parents 07b0bdd + 8d44c85 commit 4d3d432
Show file tree
Hide file tree
Showing 22 changed files with 308 additions and 217 deletions.
9 changes: 7 additions & 2 deletions assets/components/contour/templates/02-service-envoy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,17 @@ spec:
externalTrafficPolicy: Local
ports:
- port: 80
{{- if eq .Values.envoy.serviceType "NodePort" }}
nodePort: 30080
{{- end }}
name: http
protocol: TCP
- port: 443
{{- if eq .Values.envoy.serviceType "NodePort" }}
nodePort: 30443
{{- end }}
name: https
protocol: TCP
selector:
app: envoy
type: LoadBalancer

type: {{ .Values.envoy.serviceType }}
2 changes: 0 additions & 2 deletions assets/components/contour/templates/03-envoy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,9 @@ spec:
fieldPath: metadata.name
ports:
- containerPort: 80
hostPort: 80
name: http
protocol: TCP
- containerPort: 443
hostPort: 443
name: https
protocol: TCP
readinessProbe:
Expand Down
1 change: 1 addition & 0 deletions assets/components/contour/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ contour:
envoy:
image: docker.io/envoyproxy/envoy
tag: v1.13.1
serviceType:

nodeAffinity: {}
# nodeAffinity:
Expand Down
29 changes: 4 additions & 25 deletions assets/lokomotive-kubernetes/aws/flatcar-linux/kubernetes/nlb.tf
Original file line number Diff line number Diff line change
Expand Up @@ -36,33 +36,8 @@ resource "aws_lb_listener" "apiserver-https" {
}
}

# Forward HTTP ingress traffic to workers
resource "aws_lb_listener" "ingress-http" {
load_balancer_arn = aws_lb.nlb.arn
protocol = "TCP"
port = 80

default_action {
type = "forward"
target_group_arn = module.workers.target_group_http
}
}

# Forward HTTPS ingress traffic to workers
resource "aws_lb_listener" "ingress-https" {
load_balancer_arn = aws_lb.nlb.arn
protocol = "TCP"
port = 443

default_action {
type = "forward"
target_group_arn = module.workers.target_group_https
}
}

# Target group of controllers
resource "aws_lb_target_group" "controllers" {
name = "${var.cluster_name}-controllers"
vpc_id = aws_vpc.network.id
target_type = "instance"

Expand All @@ -81,6 +56,10 @@ resource "aws_lb_target_group" "controllers" {
# Interval between health checks required to be 10 or 30
interval = 10
}

tags = {
ClusterName = var.cluster_name
}
}

# Attach controller instances to apiserver NLB
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,19 +37,9 @@ output "kubeconfig" {

# Outputs for custom load balancing

output "nlb_id" {
output "nlb_arn" {
description = "ARN of the Network Load Balancer"
value = aws_lb.nlb.id
}

output "worker_target_group_http" {
description = "ARN of a target group of workers for HTTP traffic"
value = module.workers.target_group_http
}

output "worker_target_group_https" {
description = "ARN of a target group of workers for HTTPS traffic"
value = module.workers.target_group_https
value = aws_lb.nlb.arn
}

# values.yaml content for all deployed charts.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,8 @@ resource "aws_security_group_rule" "worker-http" {

type = "ingress"
protocol = "tcp"
from_port = 80
to_port = 80
from_port = 30080
to_port = 30080
cidr_blocks = ["0.0.0.0/0"]
}

Expand All @@ -206,8 +206,8 @@ resource "aws_security_group_rule" "worker-https" {

type = "ingress"
protocol = "tcp"
from_port = 443
to_port = 443
from_port = 30443
to_port = 30443
cidr_blocks = ["0.0.0.0/0"]
}

Expand All @@ -233,16 +233,6 @@ resource "aws_security_group_rule" "worker-kube-proxy" {
self = true
}

resource "aws_security_group_rule" "ingress-health" {
security_group_id = aws_security_group.worker.id

type = "ingress"
protocol = "tcp"
from_port = 10254
to_port = 10254
cidr_blocks = ["0.0.0.0/0"]
}

# Allow apiserver to access kubelets for exec, log, port-forward
resource "aws_security_group_rule" "worker-kubelet" {
security_group_id = aws_security_group.worker.id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ resource "null_resource" "copy-controller-secrets" {
resource "null_resource" "bootkube-start" {
depends_on = [
module.bootkube,
module.workers,
aws_route53_record.apiserver,
null_resource.copy-controller-secrets,
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,6 @@ variable "controller_count" {
description = "Number of controllers (i.e. masters)"
}

variable "worker_count" {
type = number
default = 1
description = "Number of workers"
}

variable "controller_type" {
type = string
# When doing the upgrades of controlplane on t3.small instance type when
Expand All @@ -47,12 +41,6 @@ variable "controller_type" {
description = "EC2 instance type for controllers"
}

variable "worker_type" {
type = string
default = "t3.small"
description = "EC2 instance type for workers"
}

variable "os_name" {
type = string
default = "flatcar"
Expand Down Expand Up @@ -89,30 +77,12 @@ variable "disk_iops" {
description = "IOPS of the EBS volume (e.g. 100)"
}

variable "worker_price" {
type = string
default = ""
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
}

variable "worker_target_groups" {
type = list(string)
description = "Additional target group ARNs to which worker instances should be added"
default = []
}

variable "controller_clc_snippets" {
type = list(string)
description = "Controller Container Linux Config snippets"
default = []
}

variable "worker_clc_snippets" {
type = list(string)
description = "Worker Container Linux Config snippets"
default = []
}

variable "tags" {
type = map
default = {
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,18 +1,35 @@
# Target groups of instances for use with load balancers
resource "aws_lb_listener" "ingress-http" {
load_balancer_arn = var.lb_arn
protocol = "TCP"
port = var.lb_http_port

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.workers-http.arn
}
}

resource "aws_lb_listener" "ingress-https" {
load_balancer_arn = var.lb_arn
protocol = "TCP"
port = var.lb_https_port

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.workers-https.arn
}
}

resource "aws_lb_target_group" "workers-http" {
name = "${var.name}-workers-http"
vpc_id = var.vpc_id
target_type = "instance"

protocol = "TCP"
port = 80
port = 30080

# HTTP health check for ingress
health_check {
protocol = "HTTP"
port = 10254
path = "/healthz"
protocol = "TCP"
port = 30080

# NLBs required to use same healthy and unhealthy thresholds
healthy_threshold = 3
Expand All @@ -21,21 +38,23 @@ resource "aws_lb_target_group" "workers-http" {
# Interval between health checks required to be 10 or 30
interval = 10
}

tags = {
ClusterName = var.cluster_name
PoolName = var.pool_name
}
}

resource "aws_lb_target_group" "workers-https" {
name = "${var.name}-workers-https"
vpc_id = var.vpc_id
target_type = "instance"

protocol = "TCP"
port = 443
port = 30443

# HTTP health check for ingress
health_check {
protocol = "HTTP"
port = 10254
path = "/healthz"
protocol = "TCP"
port = 30443

# NLBs required to use same healthy and unhealthy thresholds
healthy_threshold = 3
Expand All @@ -44,4 +63,9 @@ resource "aws_lb_target_group" "workers-https" {
# Interval between health checks required to be 10 or 30
interval = 10
}

tags = {
ClusterName = var.cluster_name
PoolName = var.pool_name
}
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
variable "name" {
variable "cluster_name" {
type = string
description = "Cluster name (prepended to pool name)"
}

variable "pool_name" {
type = string
description = "Unique name for the worker pool"
}
Expand Down Expand Up @@ -125,3 +130,19 @@ variable "cluster_domain_suffix" {
type = string
default = "cluster.local"
}

variable "lb_arn" {
description = "ARN of the load balancer on which to create listeners for this worker pool"
}

variable "lb_http_port" {
description = "Port the load balancer should listen on for HTTP connections"
type = number
default = 80
}

variable "lb_https_port" {
description = "Port the load balancer should listen on for HTTPS connections"
type = number
default = 443
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Workers AutoScaling Group
resource "aws_autoscaling_group" "workers" {
name = "${var.name}-worker"
name = "${var.cluster_name}-${var.pool_name}-workers"

# count
desired_capacity = var.worker_count
Expand Down Expand Up @@ -37,7 +37,7 @@ resource "aws_autoscaling_group" "workers" {
[
{
key = "Name"
value = "${var.name}-worker"
value = "${var.cluster_name}-${var.pool_name}-worker"
propagate_at_launch = true
},
],
Expand All @@ -54,6 +54,7 @@ resource "aws_autoscaling_group" "workers" {

# Worker template
resource "aws_launch_configuration" "worker" {
name_prefix = "${var.cluster_name}-${var.pool_name}-"
image_id = local.ami_id
instance_type = var.instance_type
spot_price = var.spot_price
Expand Down
3 changes: 2 additions & 1 deletion ci/aws/aws-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ cluster "aws" {
os_channel = var.os_channel
ssh_pubkeys = ["$PUB_KEY"]

worker_pool "$CLUSTER_ID-wp" {
worker_pool "wp" {
count = 2
ssh_pubkeys = ["$PUB_KEY"]
disk_size = 30
Expand Down Expand Up @@ -49,6 +49,7 @@ component "prometheus-operator" {
component "contour" {
ingress_hosts = ["dex.$CLUSTER_ID.$AWS_DNS_ZONE", "gangway.$CLUSTER_ID.$AWS_DNS_ZONE"]
enable_monitoring = true
service_type = "NodePort"
}

component "metrics-server" {}
Expand Down
Loading

0 comments on commit 4d3d432

Please sign in to comment.