diff --git a/cmd/create.go b/cmd/create.go index 03fd951f1..cfac01a87 100644 --- a/cmd/create.go +++ b/cmd/create.go @@ -35,62 +35,82 @@ func Create(projectName string, outDir string, t *templator.Templator) string { log.Fatalln(aurora.Red(emoji.Sprintf(":exclamation: Error creating root: %v ", err))) } - // @TODO : Clean up the following aws stuff + projectConfig := defaultProjConfig(projectName) + + chooseCloudProvider(&projectConfig) + + s := secrets.GetSecrets(rootDir) + + fillProviderDetails(&projectConfig, s) + + var wg sync.WaitGroup + util.TemplateFileIfDoesNotExist(rootDir, util.CommitYml, t.Commit0, &wg, projectConfig) + util.TemplateFileIfDoesNotExist(rootDir, ".gitignore", t.GitIgnore, &wg, projectName) + + wg.Wait() + return rootDir +} + +func chooseCloudProvider(projectConfig *util.ProjectConfiguration) { providerPrompt := promptui.Select{ Label: "Select Cloud Provider", Items: []string{"Amazon AWS", "Google GCP", "Microsoft Azure"}, } - _, _, err = providerPrompt.Run() - - regionPrompt := promptui.Select{ - Label: "Select AWS Region ", - Items: []string{"us-west-1", "us-west-2", "us-east-1", "us-east-2", "ca-central-1", - "eu-central-1", "eu-west-1", "ap-east-1", "ap-south-1"}, - } - - _, regionResult, err := regionPrompt.Run() - + _, providerResult, err := providerPrompt.Run() if err != nil { log.Fatalf("Prompt failed %v\n", err) panic(err) } - s := secrets.GetSecrets(rootDir) - - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(regionResult), - Credentials: credentials.NewStaticCredentials(s.AWS.AccessKeyID, s.AWS.SecretAccessKey, ""), - }) + if providerResult == "Amazon AWS" { + // @TODO : Move this stuff from util into another package + projectConfig.Infrastructure.AWS = &util.AWS{} + regionPrompt := promptui.Select{ + Label: "Select AWS Region ", + Items: []string{"us-west-1", "us-west-2", "us-east-1", "us-east-2", "ca-central-1", + "eu-central-1", "eu-west-1", "ap-east-1", "ap-south-1"}, + } - svc := sts.New(sess) - input := &sts.GetCallerIdentityInput{} + _, regionResult, err := regionPrompt.Run() - awsCaller, err := svc.GetCallerIdentity(input) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - default: - log.Fatalf(aerr.Error()) - } - } else { - log.Fatalf(err.Error()) + if err != nil { + log.Fatalf("Prompt failed %v\n", err) + panic(err) } - } - defaultProjConfig := defaultProjConfig(projectName) - - defaultProjConfig.Infrastructure.AWS.Region = regionResult - if awsCaller != nil && awsCaller.Account != nil { - defaultProjConfig.Infrastructure.AWS.AccountID = *awsCaller.Account + projectConfig.Infrastructure.AWS.Region = regionResult + } else { + log.Fatalf("Only the AWS provider is available at this time") } +} - var wg sync.WaitGroup - util.TemplateFileIfDoesNotExist(rootDir, util.CommitYml, t.Commit0, &wg, defaultProjConfig) - util.TemplateFileIfDoesNotExist(rootDir, ".gitignore", t.GitIgnore, &wg, projectName) +func fillProviderDetails(projectConfig *util.ProjectConfiguration, s secrets.Secrets) { + if projectConfig.Infrastructure.AWS != nil { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(projectConfig.Infrastructure.AWS.Region), + Credentials: credentials.NewStaticCredentials(s.AWS.AccessKeyID, s.AWS.SecretAccessKey, ""), + }) + + svc := sts.New(sess) + input := &sts.GetCallerIdentityInput{} + + awsCaller, err := svc.GetCallerIdentity(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + default: + log.Fatalf(aerr.Error()) + } + } else { + log.Fatalf(err.Error()) + } + } - wg.Wait() - return rootDir + if awsCaller != nil && awsCaller.Account != nil { + projectConfig.Infrastructure.AWS.AccountID = *awsCaller.Account + } + } } func defaultProjConfig(projectName string) util.ProjectConfiguration { @@ -109,6 +129,9 @@ func defaultProjConfig(projectName string) util.ProjectConfiguration { Language: "go", GitRepo: "github.com/test/repo", }}, + Infrastructure: util.Infrastructure{ + AWS: nil, + }, } } diff --git a/go.mod b/go.mod index 2073c39c9..8abf4f0ad 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect github.com/gobuffalo/logger v1.0.1 // indirect github.com/gobuffalo/packr/v2 v2.5.2 + github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.3 github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect diff --git a/go.sum b/go.sum index 3f777d374..64b7e0de4 100644 --- a/go.sum +++ b/go.sum @@ -26,6 +26,8 @@ github.com/gobuffalo/packd v0.3.0 h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4 github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= github.com/gobuffalo/packr/v2 v2.5.2 h1:4EvjeIpQLZuRIljwnidYgbRXbr1yIzVRrESiLjqKj6s= github.com/gobuffalo/packr/v2 v2.5.2/go.mod h1:sgEE1xNZ6G0FNN5xn9pevVu4nywaxHvgup67xisti08= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= diff --git a/internal/generate/generate_helper.go b/internal/generate/generate_helper.go index f1fb24957..bba082b18 100644 --- a/internal/generate/generate_helper.go +++ b/internal/generate/generate_helper.go @@ -18,12 +18,12 @@ import ( func GenerateArtifactsHelper(t *templator.Templator, cfg *config.Commit0Config, pathPrefix string, runInit bool, runApply bool) { var wg sync.WaitGroup - if !util.ValidateLanguage(cfg.Frontend.Framework) { + if cfg.Frontend.Framework != "" && !util.ValidateLanguage(cfg.Frontend.Framework) { log.Fatalln(aurora.Red(emoji.Sprintf(":exclamation: '%s' is not a supported framework.", cfg.Frontend.Framework))) } for _, s := range cfg.Services { - if !util.ValidateLanguage(cfg.Frontend.Framework) { + if !util.ValidateLanguage(s.Language) { log.Fatalln(aurora.Red(emoji.Sprintf(":exclamation: '%s' in service '%s' is not a supported language.", s.Name, s.Language))) } } diff --git a/internal/generate/terraform/generate.go b/internal/generate/terraform/generate.go index d6066d427..fee8d78af 100644 --- a/internal/generate/terraform/generate.go +++ b/internal/generate/terraform/generate.go @@ -17,11 +17,11 @@ import ( // @TODO : These are specific to a k8s version. If we make the version a config option we will need to change this var amiLookup = map[string]string{ - "us-east-1": "ami-0392bafc801b7520f", - "us-east-2": "ami-082bb518441d3954c", - "us-west-2": "ami-05d586e6f773f6abf", - "eu-west-1": "ami-059c6874350e63ca9", - "eu-central-1": "ami-0e21bc066a9dbabfa", + "us-east-1": "ami-07d6c8e62ce328a10", + "us-east-2": "ami-053250833d1030033", + "us-west-2": "ami-07be7092831897fd6", + "eu-west-1": "ami-02dca57ad67c7bf57", + "eu-central-1": "ami-03fbd442f4f3aa689", } func Generate(t *templator.Templator, cfg *config.Commit0Config, wg *sync.WaitGroup, pathPrefix string) { @@ -58,7 +58,6 @@ func GetOutputs(cfg *config.Commit0Config, pathPrefix string, outputs []string) // Init sets up anything required by Execute func Init(cfg *config.Commit0Config, pathPrefix string) { - // @TODO : Change this check. Most likely we should discover the accountid if cfg.Infrastructure.AWS.AccountId != "" { log.Println("Preparing aws environment...") @@ -69,13 +68,19 @@ func Init(cfg *config.Commit0Config, pathPrefix string) { // @TODO : A check here would be nice to see if this stuff exists first, mostly for testing log.Println(aurora.Cyan(emoji.Sprintf(":alarm_clock: Initializing remote backend..."))) util.ExecuteCommand(exec.Command("terraform", "init"), filepath.Join(pathPrefix, "bootstrap/remote-state"), envars) - util.ExecuteCommand(exec.Command("terraform", "apply", "-auto-approve"), filepath.Join(pathPrefix, "bootstrap/remote-state"), envars) + // @TODO : Properly loop through environments when we support that + util.ExecuteCommand(exec.Command("terraform", "apply", "-auto-approve", "-var", "environment=staging", "-state-out=staging.tfstate"), filepath.Join(pathPrefix, "bootstrap/remote-state"), envars) + util.ExecuteCommand(exec.Command("terraform", "apply", "-auto-approve", "-var", "environment=production", "-state-out=staging.tfstate"), filepath.Join(pathPrefix, "bootstrap/remote-state"), envars) + + log.Println("Creating users...") + util.ExecuteCommand(exec.Command("terraform", "init"), filepath.Join(pathPrefix, "bootstrap/create-users"), envars) + util.ExecuteCommand(exec.Command("terraform", "apply", "-auto-approve"), filepath.Join(pathPrefix, "bootstrap/create-users"), envars) + } } // Execute terrafrom init & plan. May modify the config passed in func Execute(cfg *config.Commit0Config, pathPrefix string) { - // @TODO : Change this check. Most likely we should discover the accountid if cfg.Infrastructure.AWS.AccountId != "" { log.Println("Preparing aws environment...") diff --git a/internal/util/projectAttributes.go b/internal/util/projectAttributes.go index 9ccfa7509..61c301c5f 100644 --- a/internal/util/projectAttributes.go +++ b/internal/util/projectAttributes.go @@ -1,5 +1,7 @@ package util +// @TODO : Move this stuff from util into another package + const ( Go = "go" React = "react" @@ -43,7 +45,7 @@ type ProjectConfiguration struct { } type Infrastructure struct { - AWS AWS `json:"aws"` + AWS *AWS `json:"aws"` } type AWS struct { AccountID string diff --git a/internal/util/secrets/secrets.go b/internal/util/secrets/secrets.go index fa35e318a..399402508 100644 --- a/internal/util/secrets/secrets.go +++ b/internal/util/secrets/secrets.go @@ -18,7 +18,9 @@ import ( // Secrets - AWS prompted credentials type Secrets struct { - AWS AWS + AWS AWS + CircleCIKey string + GithubToken string } type AWS struct { @@ -47,21 +49,13 @@ func GetSecrets(baseDir string) Secrets { if err != nil { log.Fatal(err) } + credsFile := filepath.Join(usr.HomeDir, ".aws/credentials") - var awsSecrets Secrets + var secrets Secrets // Load the credentials file to look for profiles - credsFile := filepath.Join(usr.HomeDir, ".aws/credentials") - creds, err := ioutil.ReadFile(credsFile) + profiles, err := GetAWSProfiles() if err == nil { - // Get all profiles - re := regexp.MustCompile(`\[(.*)\]`) - profileMatches := re.FindAllStringSubmatch(string(creds), -1) - profiles := make([]string, len(profileMatches)) - for i, p := range profileMatches { - profiles[i] = p[1] - } - profilePrompt := promptui.Select{ Label: "Select AWS Profile", Items: profiles, @@ -71,7 +65,7 @@ func GetSecrets(baseDir string) Secrets { creds, err := credentials.NewSharedCredentials(credsFile, profileResult).Get() if err == nil { - awsSecrets = Secrets{ + secrets = Secrets{ AWS: AWS{ AccessKeyID: creds.AccessKeyID, SecretAccessKey: creds.SecretAccessKey, @@ -81,13 +75,51 @@ func GetSecrets(baseDir string) Secrets { } // We couldn't load the credentials file, get the user to just paste them - if awsSecrets == (Secrets{}) { - awsSecrets = promptCredentials() + if secrets.AWS == (AWS{}) { + promptAWSCredentials(&secrets) + } + + if secrets.CircleCIKey == "" || secrets.GithubToken == "" { + ciPrompt := promptui.Select{ + Label: "Which Continuous integration provider do you want to use?", + Items: []string{"CircleCI", "GitHub Actions"}, + } + + _, ciResult, _ := ciPrompt.Run() + + if ciResult == "CircleCI" { + promptCircleCICredentials(&secrets) + } else if ciResult == "GitHub Actions" { + promptGitHubCredentials(&secrets) + } } - writeSecrets(secretsFile, awsSecrets) - return awsSecrets + writeSecrets(secretsFile, secrets) + return secrets + } +} + +// GetAWSProfiles returns a list of AWS forprofiles set up on the user's sytem +func GetAWSProfiles() ([]string, error) { + usr, err := user.Current() + if err != nil { + return nil, err + } + + // Load the credentials file to look for profiles + credsFile := filepath.Join(usr.HomeDir, ".aws/credentials") + creds, err := ioutil.ReadFile(credsFile) + if err != nil { + return nil, err + } + // Get all profiles + re := regexp.MustCompile(`\[(.*)\]`) + profileMatches := re.FindAllStringSubmatch(string(creds), -1) + profiles := make([]string, len(profileMatches)) + for i, p := range profileMatches { + profiles[i] = p[1] } + return profiles, nil } func readSecrets(secretsFile string) Secrets { @@ -122,7 +154,7 @@ func writeSecrets(secretsFile string, s Secrets) { } } -func promptCredentials() Secrets { +func promptAWSCredentials(secrets *Secrets) { validateAKID := func(input string) error { // 20 uppercase alphanumeric characters @@ -167,12 +199,35 @@ func promptCredentials() Secrets { panic(err) } - awsSecrets := Secrets{} - awsSecrets.AWS.AccessKeyID = accessKeyIDResult - awsSecrets.AWS.SecretAccessKey = secretAccessKeyResult + secrets.AWS.AccessKeyID = accessKeyIDResult + secrets.AWS.SecretAccessKey = secretAccessKeyResult +} - return awsSecrets +func promptGitHubCredentials(secrets *Secrets) { +} +func promptCircleCICredentials(secrets *Secrets) { + validateKey := func(input string) error { + // 40 base64 characters + var awsSecretAccessKeyPat = regexp.MustCompile(`^[A-Za-z0-9]{40}$`) + if !awsSecretAccessKeyPat.MatchString(input) { + return errors.New("Invalid CircleCI API Key") + } + return nil + } + + prompt := promptui.Prompt{ + Label: "Please enter your CircleCI API key (you can create one at https://circleci.com/account/api) ", + Validate: validateKey, + } + + key, err := prompt.Run() + + if err != nil { + log.Fatalf("Prompt failed %v\n", err) + panic(err) + } + secrets.CircleCIKey = key } func fileExists(filename string) bool { diff --git a/internal/util/util.go b/internal/util/util.go index 855b5e48a..9c8e5bbce 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -12,6 +12,7 @@ import ( "text/template" "github.com/kyokomi/emoji" + "github.com/google/uuid" "github.com/logrusorgru/aurora" ) @@ -31,6 +32,7 @@ var FuncMap = template.FuncMap{ "Title": strings.Title, "ToLower": strings.ToLower, "CleanGoIdentifier": CleanGoIdentifier, + "GenerateUUID": uuid.New, } func GetCwd() string { diff --git a/templates/commit0/commit0.tmpl b/templates/commit0/commit0.tmpl index 9035c5115..20bd2027b 100644 --- a/templates/commit0/commit0.tmpl +++ b/templates/commit0/commit0.tmpl @@ -19,11 +19,13 @@ infrastructure: enabled: true frontend: - framework: {{.FrontendFramework}} + framework: {{.FrontendFramework }} ci: system: github app: - name: {{.ProjectName}} + name: {{.ProjectName }} + app: + name: {{.FrontendHostname }} services: {{range .Services}} diff --git a/templates/kubernetes/terraform/environments/development/main.tf b/templates/kubernetes/terraform/environments/development/main.tf index 552440f1b..fbf7e5065 100644 --- a/templates/kubernetes/terraform/environments/development/main.tf +++ b/templates/kubernetes/terraform/environments/development/main.tf @@ -1,10 +1,10 @@ terraform { backend "s3" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-development-terraform-state" key = "infrastructure/terraform/environments/development/kubernetes" encrypt = true region = "{{ .Config.Infrastructure.AWS.Region }}" - dynamodb_table = "{{ .Config.Name }}-terraform-state-locks" + dynamodb_table = "{{ .Config.Name }}-development-terraform-state-locks" } } @@ -20,6 +20,10 @@ module "kubernetes" { # Assume-role policy used by monitoring fluentd daemonset assume_role_policy = data.aws_iam_policy_document.assumerole_root_policy.json + + external_dns_zone = "{{ .Config.Frontend.Hostname }}" + external_dns_owner_id = "{{ GenerateUUID }}" + external_dns_assume_roles = [ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/k8s-{{ .Config.Infrastructure.AWS.EKS.ClusterName }}-workers" ] } # Data sources for EKS IAM diff --git a/templates/kubernetes/terraform/environments/production/main.tf b/templates/kubernetes/terraform/environments/production/main.tf index 9a394bf15..d3fc76c1f 100644 --- a/templates/kubernetes/terraform/environments/production/main.tf +++ b/templates/kubernetes/terraform/environments/production/main.tf @@ -1,10 +1,10 @@ terraform { backend "s3" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-production-terraform-state" key = "infrastructure/terraform/environments/production/kubernetes" encrypt = true region = "{{ .Config.Infrastructure.AWS.Region }}" - dynamodb_table = "{{ .Config.Name }}-terraform-state-locks" + dynamodb_table = "{{ .Config.Name }}-production-terraform-state-locks" } } @@ -20,6 +20,10 @@ module "kubernetes" { # Assume-role policy used by monitoring fluentd daemonset assume_role_policy = data.aws_iam_policy_document.assumerole_root_policy.json + + external_dns_zone = "{{ .Config.Frontend.Hostname }}" + external_dns_owner_id = "{{ GenerateUUID }}" + external_dns_assume_roles = [ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/k8s-{{ .Config.Infrastructure.AWS.EKS.ClusterName }}-workers" ] } # Data sources for EKS IAM diff --git a/templates/kubernetes/terraform/environments/staging/main.tf b/templates/kubernetes/terraform/environments/staging/main.tf index ab3ce52f7..af6345866 100644 --- a/templates/kubernetes/terraform/environments/staging/main.tf +++ b/templates/kubernetes/terraform/environments/staging/main.tf @@ -1,10 +1,10 @@ terraform { backend "s3" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-staging-terraform-state" key = "infrastructure/terraform/environments/staging/kubernetes" encrypt = true region = "{{ .Config.Infrastructure.AWS.Region }}" - dynamodb_table = "{{ .Config.Name }}-terraform-state-locks" + dynamodb_table = "{{ .Config.Name }}-staging-terraform-state-locks" } } @@ -20,6 +20,10 @@ module "kubernetes" { # Assume-role policy used by monitoring fluentd daemonset assume_role_policy = data.aws_iam_policy_document.assumerole_root_policy.json + + external_dns_zone = "{{ .Config.Frontend.Hostname }}" + external_dns_owner_id = "{{ GenerateUUID }}" + external_dns_assume_roles = [ "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/k8s-{{ .Config.Infrastructure.AWS.EKS.ClusterName }}-workers" ] } # Data sources for EKS IAM diff --git a/templates/kubernetes/terraform/modules/kubernetes/external_dns.tf b/templates/kubernetes/terraform/modules/kubernetes/external_dns.tf new file mode 100644 index 000000000..a31405fdc --- /dev/null +++ b/templates/kubernetes/terraform/modules/kubernetes/external_dns.tf @@ -0,0 +1,143 @@ +# Trust relationship +data "aws_iam_policy_document" "external_dns_trust_relationship" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } + + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "AWS" + identifiers = var.external_dns_assume_roles + } + } +} + +# external-dns role +resource "aws_iam_role" "external_dns_role" { + name = "k8s-external-dns-role" + assume_role_policy = data.aws_iam_policy_document.external_dns_trust_relationship.json +} + +data "aws_iam_policy_document" "external_dns_policy_doc" { + statement { + sid = "k8sExternalDnsRead" + effect = "Allow" + + actions = [ + "route53:ListHostedZones", + "route53:ListResourceRecordSets", + ] + + resources = ["*"] + } + + statement { + sid = "k8sExternalDnsWrite" + effect = "Allow" + + actions = ["route53:ChangeResourceRecordSets"] + + resources = ["arn:aws:route53:::hostedzone/*"] + } +} + +resource "aws_iam_role_policy" "external_dns_policy" { + name = "k8s-external-dns-policy" + role = aws_iam_role.external_dns_role.id + policy = data.aws_iam_policy_document.external_dns_policy_doc.json +} + +resource "kubernetes_service_account" "external_dns" { + metadata { + name = "external-dns" + namespace = "kube-system" + } +} + +resource "kubernetes_cluster_role" "external_dns" { + metadata { + name = "external-dns" + } + rule { + verbs = ["get", "list", "watch"] + api_groups = [""] + resources = ["pods", "services"] + } + rule { + verbs = ["get", "list", "watch"] + api_groups = ["extensions"] + resources = ["ingresses"] + } +rule { + verbs = ["list"] + api_groups = [""] + resources = ["nodes"] + } +} + +resource "kubernetes_cluster_role_binding" "external_dns" { + metadata { + name = "external-dns" + } + subject { + kind = "ServiceAccount" + name = "external-dns" + namespace = "kube-system" + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = "external-dns" + } +} + +resource "kubernetes_deployment" "external_dns" { + metadata { + name = "external-dns" + namespace = "kube-system" + } + spec { + replicas = 1 + selector { + match_labels = { + "app" = "external-dns", + } + } + template { + metadata { + labels = { + "app" = "external-dns", + } + annotations = { + "iam.amazonaws.com/role" = "k8s-external-dns-role", + } + } + spec { + container { + name = "external-dns" + image = "registry.opensource.zalan.do/teapot/external-dns:latest" + args = [ + "--source=service", + "--source=ingress", + "--domain-filter=${var.external_dns_zone}", # Give access only to the specified zone + "--provider=aws", + "--aws-zone-type=public", + "--policy=upsert-only", # Prevent ExternalDNS from deleting any records + "--registry=txt", + "--txt-owner-id=${var.external_dns_owner_id}", # ID of txt record to manage state + ] + } + + service_account_name = "external-dns" + automount_service_account_token = true + } + } + } +} diff --git a/templates/kubernetes/terraform/modules/kubernetes/kube2iam/main.tf b/templates/kubernetes/terraform/modules/kubernetes/kube2iam/main.tf index 3d01bd81a..75d16d8d7 100755 --- a/templates/kubernetes/terraform/modules/kubernetes/kube2iam/main.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/kube2iam/main.tf @@ -89,7 +89,7 @@ resource "kubernetes_daemonset" "kube2iam" { # } env { name = "AWS_REGION" - value = var.environment + value = var.region } security_context { privileged = true diff --git a/templates/kubernetes/terraform/modules/kubernetes/variables.tf b/templates/kubernetes/terraform/modules/kubernetes/variables.tf index 050b728b8..e8964d21a 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/variables.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/variables.tf @@ -12,4 +12,17 @@ variable "cluster_name" { variable "assume_role_policy" { description = "Assume-role policy for monitoring" -} \ No newline at end of file +} + +variable "external_dns_zone" { + description = "R53 zone that external-dns will have access to" +} + +variable "external_dns_owner_id" { + description = "Unique id of the TXT record that external-dns will use to store state (can just be a uuid)" +} + +variable "external_dns_assume_roles" { + type = "list" + description = "List of roles that should be able to assume the external dns role (most likely the role of the cluster worker nodes)" +} diff --git a/templates/terraform/bootstrap/create-users/.gitignore b/templates/terraform/bootstrap/create-users/.gitignore new file mode 100644 index 000000000..f2eacdf5c --- /dev/null +++ b/templates/terraform/bootstrap/create-users/.gitignore @@ -0,0 +1,2 @@ +# The state will have keys in it which can be ignored +terraform.tfstate* diff --git a/templates/terraform/bootstrap/create-users/main.tf b/templates/terraform/bootstrap/create-users/main.tf new file mode 100644 index 000000000..cd12fa78e --- /dev/null +++ b/templates/terraform/bootstrap/create-users/main.tf @@ -0,0 +1,23 @@ +provider "aws" { + region = "{{ .Config.Infrastructure.AWS.Region }}" +} + +# Create the CI User +resource "aws_iam_user" "ci_user" { + name = "ci-user" +} + +# Create a keypair to be used by CI systems +resource "aws_iam_access_key" "ci_user" { + user = aws_iam_user.ci_user.name +} + +# Add the keys to AWS secrets manager +resource "aws_secretsmanager_secret" "ci_user_keys" { + name = "ci-user-keys" +} + +resource "aws_secretsmanager_secret_version" "ci_user_keys" { + secret_id = aws_secretsmanager_secret.ci_user_keys.id + secret_string = jsonencode(map("access_key_id", aws_iam_access_key.ci_user.id, "secret_key", aws_iam_access_key.ci_user.secret)) +} diff --git a/templates/terraform/bootstrap/remote-state/main.tf b/templates/terraform/bootstrap/remote-state/main.tf index cbf53d65b..5965bbc42 100644 --- a/templates/terraform/bootstrap/remote-state/main.tf +++ b/templates/terraform/bootstrap/remote-state/main.tf @@ -3,7 +3,7 @@ provider "aws" { } resource "aws_s3_bucket" "terraform_remote_state" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-${var.environment}-terraform-state" acl = "private" versioning { @@ -12,8 +12,7 @@ resource "aws_s3_bucket" "terraform_remote_state" { } resource "aws_s3_bucket_public_access_block" "terraform_remote_state" { - bucket = "${aws_s3_bucket.terraform_remote_state.id}" - + bucket = aws_s3_bucket.terraform_remote_state.id block_public_acls = true block_public_policy = true @@ -22,7 +21,7 @@ resource "aws_s3_bucket_public_access_block" "terraform_remote_state" { } resource "aws_dynamodb_table" "terraform_state_locks" { - name = "{{ .Config.Name }}-terraform-state-locks" + name = "{{ .Config.Name }}-${var.environment}-terraform-state-locks" read_capacity = 2 write_capacity = 2 hash_key = "LockID" @@ -32,3 +31,7 @@ resource "aws_dynamodb_table" "terraform_state_locks" { type = "S" } } + +variable "environment" { + description = "The environment (development/staging/production)" +} diff --git a/templates/terraform/environments/development/main.tf b/templates/terraform/environments/development/main.tf index ce8c94f13..04dbfc64c 100644 --- a/templates/terraform/environments/development/main.tf +++ b/templates/terraform/environments/development/main.tf @@ -1,11 +1,11 @@ terraform { required_version = ">= 0.12" backend "s3" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-development-terraform-state" key = "infrastructure/terraform/environments/development/main" encrypt = true region = "{{ .Config.Infrastructure.AWS.Region }}" - dynamodb_table = "{{ .Config.Name }}-terraform-state-locks" + dynamodb_table = "{{ .Config.Name }}-development-terraform-state-locks" } } @@ -15,7 +15,7 @@ module "development" { environment = "development" # Project configuration - project = "{{ .Config.Infrastructure.AWS.EKS.ClusterName }}" + project = "{{ .Config.Name }}" region = "{{ .Config.Infrastructure.AWS.Region }}" allowed_account_ids = ["{{ .Config.Infrastructure.AWS.AccountId }}"] @@ -25,20 +25,29 @@ module "development" { # EKS configuration eks_worker_instance_type = "t2.small" - eks_worker_asg_max_size = 2 + eks_worker_asg_min_size = 1 + eks_worker_asg_max_size = 3 # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html # https://us-east-1.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.14%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=us-east-1 eks_worker_ami = "{{ .Config.Infrastructure.AWS.EKS.WorkerAMI }}" {{- end }} - # Client configuration + {{- if .Config.Infrastructure.AWS.Cognito.Enabled }} + # Cognito configuration user_pool = "{{ .Config.Name }}-development" hostname = "{{ .Config.Frontend.Hostname }}" - s3_hosting_bucket_name = "{{ .Config.Name }}-development" + {{- end}} + + # Hosting configuration + s3_hosting_buckets = [ + "{{ .Config.Name }}-development" + ] + s3_hosting_cert_domain = "{{ .Config.Frontend.Hostname}}" } +{{- if .Config.Infrastructure.AWS.Cognito.Enabled }} output "cognito_client_id" { value = module.staging.cognito.cognito_client_id } @@ -46,3 +55,4 @@ output "cognito_client_id" { output "cognito_pool_id" { value = module.staging.cognito.cognito_pool_id } +{{- end}} diff --git a/templates/terraform/environments/production/main.tf b/templates/terraform/environments/production/main.tf index 87c2c1368..f07dad488 100644 --- a/templates/terraform/environments/production/main.tf +++ b/templates/terraform/environments/production/main.tf @@ -1,11 +1,11 @@ terraform { required_version = ">= 0.12" backend "s3" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-production-terraform-state" key = "infrastructure/terraform/environments/production/main" encrypt = true region = "{{ .Config.Infrastructure.AWS.Region }}" - dynamodb_table = "{{ .Config.Name }}-terraform-state-locks" + dynamodb_table = "{{ .Config.Name }}-production-terraform-state-locks" } } @@ -15,7 +15,7 @@ module "production" { environment = "production" # Project configuration - project = "{{ .Config.Infrastructure.AWS.EKS.ClusterName }}" + project = "{{ .Config.Name }}" region = "{{ .Config.Infrastructure.AWS.Region }}" allowed_account_ids = ["{{ .Config.Infrastructure.AWS.AccountId }}"] @@ -25,20 +25,29 @@ module "production" { # EKS configuration eks_worker_instance_type = "m4.large" - eks_worker_asg_max_size = 3 + eks_worker_asg_min_size = 3 + eks_worker_asg_max_size = 6 # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html # https://us-east-1.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.14%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=us-east-1 eks_worker_ami = "{{ .Config.Infrastructure.AWS.EKS.WorkerAMI }}" {{- end }} - # Client configuration + {{- if .Config.Infrastructure.AWS.Cognito.Enabled }} + # Cognito configuration user_pool = "{{ .Config.Name }}-production" hostname = "{{ .Config.Frontend.Hostname }}" - s3_hosting_bucket_name = "{{ .Config.Name }}-production" + {{- end}} + + # Hosting configuration + s3_hosting_buckets = [ + "{{ .Config.Name }}-production" + ] + s3_hosting_cert_domain = "{{ .Config.Frontend.Hostname}}" } +{{- if .Config.Infrastructure.AWS.Cognito.Enabled }} output "cognito_client_id" { value = module.staging.cognito.cognito_client_id } @@ -46,3 +55,4 @@ output "cognito_client_id" { output "cognito_pool_id" { value = module.staging.cognito.cognito_pool_id } +{{- end}} diff --git a/templates/terraform/environments/staging/main.tf b/templates/terraform/environments/staging/main.tf index ace31c046..6be9dfa99 100644 --- a/templates/terraform/environments/staging/main.tf +++ b/templates/terraform/environments/staging/main.tf @@ -1,11 +1,11 @@ terraform { required_version = ">= 0.12" backend "s3" { - bucket = "project-{{ .Config.Name }}-terraform-state" + bucket = "{{ .Config.Name }}-staging-terraform-state" key = "infrastructure/terraform/environments/staging/main" encrypt = true region = "{{ .Config.Infrastructure.AWS.Region }}" - dynamodb_table = "{{ .Config.Name }}-terraform-state-locks" + dynamodb_table = "{{ .Config.Name }}-staging-terraform-state-locks" } } @@ -15,29 +15,42 @@ module "staging" { environment = "staging" # Project configuration - project = "{{ .Config.Infrastructure.AWS.EKS.ClusterName }}" + project = "{{ .Config.Name }}" region = "{{ .Config.Infrastructure.AWS.Region }}" allowed_account_ids = ["{{ .Config.Infrastructure.AWS.AccountId }}"] - {{- if ne .Config.Infrastructure.AWS.EKS.ClusterName "" }} +{{- if ne .Config.Infrastructure.AWS.EKS.ClusterName "" }} # ECR configuration - ecr_repositories = ["{{ .Config.Infrastructure.AWS.EKS.ClusterName }}"] + ecr_repositories = [ + {{- range .Config.Services }} + "{{ .Name }}", + {{- end }} + ] # EKS configuration eks_worker_instance_type = "t2.small" - eks_worker_asg_max_size = 2 + eks_worker_asg_min_size = 2 + eks_worker_asg_max_size = 6 # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html # https://us-east-1.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.14%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=us-east-1 eks_worker_ami = "{{ .Config.Infrastructure.AWS.EKS.WorkerAMI }}" - {{- end }} +{{- end }} - # Client configuration + {{- if .Config.Infrastructure.AWS.Cognito.Enabled }} + # Cognito configuration user_pool = "{{ .Config.Name }}-staging" hostname = "{{ .Config.Frontend.Hostname }}" - s3_hosting_bucket_name = "{{ .Config.Name }}-staging" + {{- end }} + + # Hosting configuration + s3_hosting_buckets = [ + "{{ .Config.Name }}-staging" + ] + s3_hosting_cert_domain = "{{ .Config.Frontend.Hostname}}" } +{{- if .Config.Infrastructure.AWS.Cognito.Enabled }} output "cognito_client_id" { value = module.staging.cognito.cognito_client_id } @@ -45,3 +58,4 @@ output "cognito_client_id" { output "cognito_pool_id" { value = module.staging.cognito.cognito_pool_id } +{{- end}} diff --git a/templates/terraform/modules/ecr/main.tf b/templates/terraform/modules/ecr/main.tf index 0f7780815..9cbd9ecf1 100644 --- a/templates/terraform/modules/ecr/main.tf +++ b/templates/terraform/modules/ecr/main.tf @@ -1,9 +1,43 @@ resource "aws_ecr_repository" "ecr_repository" { - count = length(var.ecr_repositories) - name = element(var.ecr_repositories, count.index) + for_each = var.ecr_repositories + name = each.value tags = { environment = var.environment } } +data "aws_iam_policy_document" "ecr_fullaccess" { + count = var.enabled ? 1 : 0 + + statement { + sid = "FullAccess" + effect = "Allow" + + principals { + type = "AWS" + + identifiers = var.ecr_principals + } + + actions = [ + "ecr:GetAuthorizationToken", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + "ecr:PutImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage", + ] + } +} + +resource "aws_ecr_repository_policy" "default" { + repository = join("", aws_ecr_repository.ecr_repository.*.name) + policy = data.aws_iam_policy_document.ecr_fullaccess.json +} diff --git a/templates/terraform/modules/ecr/variables.tf b/templates/terraform/modules/ecr/variables.tf index 8fbb77252..67150990b 100644 --- a/templates/terraform/modules/ecr/variables.tf +++ b/templates/terraform/modules/ecr/variables.tf @@ -7,3 +7,8 @@ variable "ecr_repositories" { type = list(string) } +variable "ecr_principals" { + description = "List of principals (most likely users) to give full access to the created ECR repositories" + type = list(string) +} + diff --git a/templates/terraform/modules/eks/main.tf b/templates/terraform/modules/eks/main.tf index ef7f32ba4..c2164ef74 100644 --- a/templates/terraform/modules/eks/main.tf +++ b/templates/terraform/modules/eks/main.tf @@ -9,16 +9,18 @@ module "eks" { source = "terraform-aws-modules/eks/aws" version = "6.0.2" - cluster_name = var.project + cluster_name = var.cluster_name cluster_version = "1.14" subnets = var.private_subnets vpc_id = var.vpc_id worker_groups = [ { - instance_type = var.worker_instance_type - asg_max_size = var.worker_asg_max_size - ami_id = var.worker_ami + instance_type = var.worker_instance_type + asg_min_size = var.worker_asg_min_size + asg_desired_capacity = var.worker_asg_min_size + asg_max_size = var.worker_asg_max_size + ami_id = var.worker_ami tags = [{ key = "environment" value = var.environment @@ -34,6 +36,8 @@ module "eks" { groups = ["system:masters"] }, ] + cluster_iam_role_name = "k8s-${var.cluster_name}-cluster" + workers_role_name = "k8s-${var.cluster_name}-workers" # TODO, determine if this should be true/false manage_aws_auth = true diff --git a/templates/terraform/modules/eks/variables.tf b/templates/terraform/modules/eks/variables.tf index b68fdcd73..780b774ec 100644 --- a/templates/terraform/modules/eks/variables.tf +++ b/templates/terraform/modules/eks/variables.tf @@ -6,6 +6,10 @@ variable "environment" { description = "The environment (dev/staging/prod)" } +variable "cluster_name" { + description = "Name to be given to the EKS cluster" +} + variable "assume_role_policy" { description = "IAM policy document for AssumeRole" } @@ -23,6 +27,10 @@ variable "worker_instance_type" { description = "Instance type for the EKS workers" } +variable "worker_asg_min_size" { + description = "Minimum number of instances for the EKS ASG" +} + variable "worker_asg_max_size" { description = "Maximum number of instances for the EKS ASG" } diff --git a/templates/terraform/modules/environment/main.tf b/templates/terraform/modules/environment/main.tf index 5bc06463a..231ca2f2c 100644 --- a/templates/terraform/modules/environment/main.tf +++ b/templates/terraform/modules/environment/main.tf @@ -1,10 +1,15 @@ # Environment entrypoint +locals { + kubernetes_cluster_name = "${var.project}-${var.environment}-${var.region}" +} + module "vpc" { - source = "../../modules/vpc" - project = var.project - environment = var.environment - region = var.region + source = "../../modules/vpc" + project = var.project + environment = var.environment + region = var.region + kubernetes_cluster_name = local.kubernetes_cluster_name } # Data sources for EKS IAM @@ -21,18 +26,23 @@ data "aws_iam_policy_document" "assumerole_root_policy" { } } +{{- if ne .Config.Infrastructure.AWS.EKS.ClusterName "" }} # Provision the EKS cluster module "eks" { source = "../../modules/eks" project = var.project environment = var.environment + cluster_name = local.kubernetes_cluster_name + iam_account_id = data.aws_caller_identity.current.account_id + assume_role_policy = data.aws_iam_policy_document.assumerole_root_policy.json private_subnets = module.vpc.private_subnets vpc_id = module.vpc.vpc_id + worker_instance_type = var.eks_worker_instance_type + worker_asg_min_size = var.eks_worker_asg_min_size worker_asg_max_size = var.eks_worker_asg_max_size worker_ami = var.eks_worker_ami # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html - iam_account_id = data.aws_caller_identity.current.account_id } module "kube2iam" { @@ -42,8 +52,23 @@ module "kube2iam" { eks_worker_iam_role_name = module.eks.worker_iam_role_name iam_account_id = data.aws_caller_identity.current.account_id } +{{- end}} + +data "aws_iam_user" "ci_user" { + user_name = "ci-user" # Should have been created in the bootstrap process +} +{{- if .Config.Services }} -# {{ if .Config.Infrastructure.AWS.Cognito.Enabled }} +# Set up ECR repositories +module "ecr" { + source = "../../modules/ecr" + environment = var.environment + ecr_repositories = var.ecr_repositories + ecr_principals = [aws_iam_user.id] +} +{{- end}} + +{{- if .Config.Infrastructure.AWS.Cognito.Enabled }} module "cognito" { source = "../../modules/cognito" user_pool = var.user_pool @@ -53,11 +78,13 @@ module "cognito" { output "cognito" { value = module.cognito } -# {{- end}} +{{- end}} -# {{ if .Config.Infrastructure.AWS.S3Hosting.Enabled }} +{{ if .Config.Infrastructure.AWS.S3Hosting.Enabled }} module "s3_hosting" { - source = "../../modules/s3_hosting" - bucket_name = var.s3_hosting_bucket_name + source = "../../modules/s3_hosting" + buckets = var.s3_hosting_buckets + cert_domain = var.s3_hosting_cert_domain + project = var.project } -# {{- end}} +{{- end}} diff --git a/templates/terraform/modules/environment/variables.tf b/templates/terraform/modules/environment/variables.tf index 660894412..2ff27eae7 100644 --- a/templates/terraform/modules/environment/variables.tf +++ b/templates/terraform/modules/environment/variables.tf @@ -24,6 +24,10 @@ variable "eks_worker_instance_type" { description = "Instance type for the EKS workers" } +variable "eks_worker_asg_min_size" { + description = "Minimum number of instances for the EKS ASG" +} + variable "eks_worker_asg_max_size" { description = "Maximum number of instances for the EKS ASG" } @@ -32,14 +36,18 @@ variable "eks_worker_ami" { description = "The (EKS-optimized) AMI for EKS worker instances" } -variable "user_pool" { - description = "AWS Cognito pool name" -} - +{{- if .Config.Infrastructure.AWS.Cognito.Enabled }} variable "hostname" { description = "Application hostname" -} +} +{{- end }} +variable "s3_hosting_buckets" { + description = "S3 hosting buckets" + type = set(string) +} -variable "s3_hosting_bucket_name" { - description = "S3 hosting bucket name" +variable "s3_hosting_cert_domain" { + description = "Domain of the ACM certificate to lookup for Cloudfront to use" + type = string } + diff --git a/templates/terraform/modules/s3_hosting/main.tf b/templates/terraform/modules/s3_hosting/main.tf index 2607f019b..f379e93d2 100644 --- a/templates/terraform/modules/s3_hosting/main.tf +++ b/templates/terraform/modules/s3_hosting/main.tf @@ -1,26 +1,14 @@ -resource "aws_s3_bucket" "www" { - // Our bucket's name is going to be the same as our site's domain name. - bucket = "${var.bucket_name}" - // Because we want our site to be available on the internet, we set this so - // anyone can read this bucket. - acl = "public-read" - // We also need to create a policy that allows anyone to view the content. - // This is basically duplicating what we did in the ACL but it's required by - // AWS. This post: http://amzn.to/2Fa04ul explains why. - policy = <