Skip to content

Commit

Permalink
Some other fixes from using commit0 for a real project
Browse files Browse the repository at this point in the history
  • Loading branch information
bmonkman committed Jan 16, 2020
1 parent 50d7436 commit 3b75283
Show file tree
Hide file tree
Showing 7 changed files with 29 additions and 9 deletions.
2 changes: 1 addition & 1 deletion templates/commit0/commit0.tmpl
Expand Up @@ -12,7 +12,7 @@ infrastructure:
accountId: {{ .Infrastructure.AWS.AccountID }}
region: {{ .Infrastructure.AWS.Region }}
eks:
clusterName: staging
clusterName: {{.ProjectName}}-staging-{{ .Infrastructure.AWS.Region }}
cognito:
enabled: true
s3Hosting:
Expand Down
Expand Up @@ -19,6 +19,6 @@ data "aws_iam_policy" "CloudWatchAgentServerPolicy" {
}

resource "aws_iam_role_policy_attachment" "k8s_monitoring_role_policy" {
role = "${aws_iam_role.k8s_monitoring.id}"
policy_arn = "${data.aws_iam_policy.CloudWatchAgentServerPolicy.arn}"
role = aws_iam_role.k8s_monitoring.id
policy_arn = data.aws_iam_policy.CloudWatchAgentServerPolicy.arn
}
14 changes: 9 additions & 5 deletions templates/kubernetes/terraform/modules/kubernetes/provider.tf
Expand Up @@ -5,12 +5,16 @@ data "aws_eks_cluster" "cluster" {
}

data "aws_eks_cluster_auth" "cluster_auth" {
name = "${data.aws_eks_cluster.cluster.name}"
name = data.aws_eks_cluster.cluster.name
}

provider "aws" {
region = var.region
}

provider "kubernetes" {
host = "${data.aws_eks_cluster.cluster.endpoint}"
cluster_ca_certificate = "${base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)}"
token = "${data.aws_eks_cluster_auth.cluster_auth.token}"
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster_auth.token
load_config_file = false
}
}
Expand Up @@ -23,6 +23,6 @@ variable "external_dns_owner_id" {
}

variable "external_dns_assume_roles" {
type = "list"
description = "List of roles that should be able to assume the external dns role (most likely the role of the cluster worker nodes)"
type = list(string)
}
6 changes: 6 additions & 0 deletions templates/terraform/README.md
Expand Up @@ -93,3 +93,9 @@
environment/development$ terraform init
environment/development$ terraform plan
```

## To use kubectl with the created EKS cluster:

Exchange your aws credentials for kubernetes credentials.
This will add a new context to your kubeconfig.
`aws eks update-kubeconfig --name <cluster name> --region <aws region>`
7 changes: 7 additions & 0 deletions templates/terraform/modules/s3_hosting/main.tf
Expand Up @@ -67,8 +67,15 @@ resource "aws_s3_bucket_policy" "client_assets" {
policy = data.aws_iam_policy_document.assets_origin[each.value].json
}

# To use an ACM cert with CF it has to exist in us-east-1
provider "aws" {
region = "us-east-1"
alias = "east1"
}

# Find an already created ACM cert for this domain
data "aws_acm_certificate" "wildcard_cert" {
provider = "aws.east1"
domain = var.cert_domain
most_recent = "true"
}
Expand Down
3 changes: 3 additions & 0 deletions templates/terraform/modules/vpc/main.tf
Expand Up @@ -31,4 +31,7 @@ module "vpc" {
environment = var.environment
}

vpc_tags = {
"kubernetes.io/cluster/${var.kubernetes_cluster_name}" = "shared"
}
}

0 comments on commit 3b75283

Please sign in to comment.