Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Some other fixes from using commit0 for a real project #94

Merged
merged 2 commits into from
Jan 16, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion templates/commit0/commit0.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ infrastructure:
accountId: {{ .Infrastructure.AWS.AccountID }}
region: {{ .Infrastructure.AWS.Region }}
eks:
clusterName: staging
clusterName: {{.ProjectName}}-staging-{{ .Infrastructure.AWS.Region }}
cognito:
enabled: true
s3Hosting:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ data "aws_iam_policy" "CloudWatchAgentServerPolicy" {
}

resource "aws_iam_role_policy_attachment" "k8s_monitoring_role_policy" {
role = "${aws_iam_role.k8s_monitoring.id}"
policy_arn = "${data.aws_iam_policy.CloudWatchAgentServerPolicy.arn}"
role = aws_iam_role.k8s_monitoring.id
policy_arn = data.aws_iam_policy.CloudWatchAgentServerPolicy.arn
}
14 changes: 9 additions & 5 deletions templates/kubernetes/terraform/modules/kubernetes/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,16 @@ data "aws_eks_cluster" "cluster" {
}

data "aws_eks_cluster_auth" "cluster_auth" {
name = "${data.aws_eks_cluster.cluster.name}"
name = data.aws_eks_cluster.cluster.name
}

provider "aws" {
region = var.region
}

provider "kubernetes" {
host = "${data.aws_eks_cluster.cluster.endpoint}"
cluster_ca_certificate = "${base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)}"
token = "${data.aws_eks_cluster_auth.cluster_auth.token}"
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster_auth.token
load_config_file = false
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ variable "external_dns_owner_id" {
}

variable "external_dns_assume_roles" {
type = "list"
description = "List of roles that should be able to assume the external dns role (most likely the role of the cluster worker nodes)"
type = list(string)
}
6 changes: 6 additions & 0 deletions templates/terraform/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,9 @@
environment/development$ terraform init
environment/development$ terraform plan
```

## To use kubectl with the created EKS cluster:

Exchange your aws credentials for kubernetes credentials.
This will add a new context to your kubeconfig.
`aws eks update-kubeconfig --name <cluster name> --region <aws region>`
7 changes: 7 additions & 0 deletions templates/terraform/modules/s3_hosting/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,15 @@ resource "aws_s3_bucket_policy" "client_assets" {
policy = data.aws_iam_policy_document.assets_origin[each.value].json
}

# To use an ACM cert with CF it has to exist in us-east-1
provider "aws" {
region = "us-east-1"
alias = "east1"
}

# Find an already created ACM cert for this domain
data "aws_acm_certificate" "wildcard_cert" {
provider = "aws.east1"
domain = var.cert_domain
most_recent = "true"
}
Expand Down
3 changes: 3 additions & 0 deletions templates/terraform/modules/vpc/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,7 @@ module "vpc" {
environment = var.environment
}

vpc_tags = {
"kubernetes.io/cluster/${var.kubernetes_cluster_name}" = "shared"
}
}