Skip to content

Commit

Permalink
Added kubernetes specs
Browse files Browse the repository at this point in the history
  • Loading branch information
Shah Newaz Khan committed Oct 28, 2019
1 parent 516299b commit 245aed5
Show file tree
Hide file tree
Showing 14 changed files with 667 additions and 9 deletions.
8 changes: 6 additions & 2 deletions cmd/generate.go
@@ -1,15 +1,16 @@
package cmd

import (

"log"
"sync"

"github.com/commitdev/commit0/internal/config"
"github.com/commitdev/commit0/internal/generate/docker"
"github.com/commitdev/commit0/internal/generate/golang"
"github.com/commitdev/commit0/internal/generate/http"
"github.com/commitdev/commit0/internal/generate/proto"
"github.com/commitdev/commit0/internal/generate/react"
"github.com/commitdev/commit0/internal/generate/kubernetes"
"github.com/commitdev/commit0/internal/templator"
"github.com/commitdev/commit0/internal/util"
"github.com/gobuffalo/packr/v2"
Expand All @@ -22,9 +23,10 @@ var language string
const (
Go = "go"
React = "react"
Kubernetes = "kubernetes"
)

var supportedLanguages = [...]string{Go, React}
var supportedLanguages = [...]string{Go, React, Kubernetes}

func init() {

Expand Down Expand Up @@ -59,6 +61,8 @@ var generateCmd = &cobra.Command{
docker.GenerateGoDockerCompose(t, cfg, &wg)
case React:
react.Generate(t, cfg, &wg)
case Kubernetes:
kubernetes.Generate(Templator, cfg)
}

util.TemplateFileIfDoesNotExist("", "README.md", t.Readme, &wg, cfg)
Expand Down
5 changes: 3 additions & 2 deletions go.mod
Expand Up @@ -8,11 +8,12 @@ require (
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
github.com/k0kubun/pp v3.0.1+incompatible
github.com/mattn/go-colorable v0.1.2 // indirect
github.com/rogpeppe/go-internal v1.3.2 // indirect
github.com/rogpeppe/go-internal v1.5.0 // indirect
github.com/spf13/cobra v0.0.5
github.com/stretchr/testify v1.4.0 // indirect
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 // indirect
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v2 v2.2.4
)
10 changes: 6 additions & 4 deletions go.sum
Expand Up @@ -48,8 +48,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.2 h1:XU784Pr0wdahMY2bYcyK6N1KuaRAdLtqD4qd8D18Bfs=
github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.5.0 h1:Usqs0/lDK/NqTkvrmKSwA/3XkZAs7ZAW/eLeQ2MVBTw=
github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
Expand All @@ -74,6 +74,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
Expand All @@ -86,8 +88,8 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
Expand Down
8 changes: 7 additions & 1 deletion internal/config/config.go
Expand Up @@ -55,7 +55,13 @@ type Commit0Config struct {
Maintainers []Maintainers `yaml:"maintainers"`
Network Network `yaml:"network"`
Services []Service `yaml:"services"`
React React `yaml:react`
React React `yaml:"react"`
Kubernetes Kubernetes `yaml:"kubernetes"`
}

type Kubernetes struct {
ClusterName string
DNSName string
}

func LoadConfig(filePath string) *Commit0Config {
Expand Down
13 changes: 13 additions & 0 deletions internal/generate/kubernetes/generate.go
@@ -0,0 +1,13 @@
package kubernetes

import (
//"github.com/commitdev/commit0/util"

"github.com/commitdev/commit0/config"
"github.com/commitdev/commit0/templator"
)

func Generate(templator *templator.Templator, config *config.Commit0Config) {
templator.Kubernetes.TemplateFiles(config, false)
}

29 changes: 29 additions & 0 deletions templates/kubernetes/teraform/README.md
@@ -0,0 +1,29 @@
# EKS Terraform

AWS Resources created:

- EKS Cluster: AWS managed Kubernetes cluster of master servers
- AutoScaling Group containing 2 m4.large instances based on the latest EKS Amazon Linux 2 AMI: Operator managed Kubernetes worker nodes for running Kubernetes service deployments
- Associated VPC, Internet Gateway, Security Groups, and Subnets: Operator managed networking resources for the EKS Cluster and worker node instances
- Associated IAM Roles and Policies: Operator managed access resources for EKS and worker node instances

## Pre-requisites

- Setup the [AWS credentials](https://www.terraform.io/docs/providers/aws/index.html#environment-variables) for terraform

## Spin up cluster

```shell

terraform plan
terraform apply

```

### Connect to cluster
The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster to allow worker nodes to join the cluster via AWS IAM role authentication.

- Run `aws eks update-kubeconfig --name staging` to configure `kubectl`
- Run `terraform output config_map_aws_auth` and save the configuration into a file, e.g. config_map_aws_auth.yaml
- Run `kubectl apply -f config_map_aws_auth.yaml`
- You can verify the worker nodes are joining the cluster via: `kubectl get nodes --watch`
87 changes: 87 additions & 0 deletions templates/kubernetes/teraform/eks-cluster.tf
@@ -0,0 +1,87 @@
#
# EKS Cluster Resources
# * IAM Role to allow EKS service to manage other AWS services
# * EC2 Security Group to allow networking traffic with EKS cluster
# * EKS Cluster
#

resource "aws_iam_role" "demo-cluster" {
name = "terraform-eks-demo-cluster"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "demo-cluster-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.demo-cluster.name}"
}

resource "aws_iam_role_policy_attachment" "demo-cluster-AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.demo-cluster.name}"
}

resource "aws_security_group" "demo-cluster" {
name = "terraform-eks-demo-cluster"
description = "Cluster communication with worker nodes"
vpc_id = "${aws_vpc.demo.id}"

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

tags = {
Name = "terraform-eks-demo"
}
}

resource "aws_security_group_rule" "demo-cluster-ingress-node-https" {
description = "Allow pods to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.demo-cluster.id}"
source_security_group_id = "${aws_security_group.demo-node.id}"
to_port = 443
type = "ingress"
}

resource "aws_security_group_rule" "demo-cluster-ingress-workstation-https" {
cidr_blocks = ["${local.workstation-external-cidr}"]
description = "Allow workstation to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.demo-cluster.id}"
to_port = 443
type = "ingress"
}

resource "aws_eks_cluster" "demo" {
name = "${var.cluster-name}"
role_arn = "${aws_iam_role.demo-cluster.arn}"

vpc_config {
security_group_ids = ["${aws_security_group.demo-cluster.id}"]
subnet_ids = ["${aws_subnet.demo-0.id}","${aws_subnet.demo-1.id}"]
}

depends_on = [
"aws_iam_role_policy_attachment.demo-cluster-AmazonEKSClusterPolicy",
"aws_iam_role_policy_attachment.demo-cluster-AmazonEKSServicePolicy",
]
}
145 changes: 145 additions & 0 deletions templates/kubernetes/teraform/eks-worker-ndoes.tf
@@ -0,0 +1,145 @@
#
# EKS Worker Nodes Resources
# * IAM role allowing Kubernetes actions to access other AWS services
# * EC2 Security Group to allow networking traffic
# * Data source to fetch latest EKS worker AMI
# * AutoScaling Launch Configuration to configure worker instances
# * AutoScaling Group to launch worker instances
#

resource "aws_iam_role" "demo-node" {
name = "terraform-eks-demo-node"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "demo-node-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.demo-node.name}"
}

resource "aws_iam_role_policy_attachment" "demo-node-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.demo-node.name}"
}

resource "aws_iam_role_policy_attachment" "demo-node-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.demo-node.name}"
}

resource "aws_iam_instance_profile" "demo-node" {
name = "terraform-eks-demo"
role = "${aws_iam_role.demo-node.name}"
}

resource "aws_security_group" "demo-node" {
name = "terraform-eks-demo-node"
description = "Security group for all nodes in the cluster"
vpc_id = "${aws_vpc.demo.id}"

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

tags = "${
map(
"Name", "terraform-eks-demo-node",
"kubernetes.io/cluster/${var.cluster-name}", "owned",
)
}"
}

resource "aws_security_group_rule" "demo-node-ingress-self" {
description = "Allow node to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = "${aws_security_group.demo-node.id}"
source_security_group_id = "${aws_security_group.demo-node.id}"
to_port = 65535
type = "ingress"
}

resource "aws_security_group_rule" "demo-node-ingress-cluster" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = "${aws_security_group.demo-node.id}"
source_security_group_id = "${aws_security_group.demo-cluster.id}"
to_port = 65535
type = "ingress"
}

data "aws_ami" "eks-worker" {
filter {
name = "name"
values = ["amazon-eks-node-${aws_eks_cluster.demo.version}-v*"]
}

most_recent = true
owners = ["602401143452"] # Amazon EKS AMI Account ID
}

# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
locals {
demo-node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.demo.endpoint}' --b64-cluster-ca '${aws_eks_cluster.demo.certificate_authority.0.data}' '${var.cluster-name}'
USERDATA
}

resource "aws_launch_configuration" "demo" {
associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.demo-node.name}"
image_id = "${data.aws_ami.eks-worker.id}"
instance_type = "m4.large"
name_prefix = "terraform-eks-demo"
security_groups = ["${aws_security_group.demo-node.id}"]
user_data_base64 = "${base64encode(local.demo-node-userdata)}"

lifecycle {
create_before_destroy = true
}
}

resource "aws_autoscaling_group" "demo" {
desired_capacity = 2
launch_configuration = "${aws_launch_configuration.demo.id}"
max_size = 2
min_size = 1
name = "terraform-eks-demo"
vpc_zone_identifier = ["${aws_subnet.demo-0.id}","${aws_subnet.demo-1.id}"]

tag {
key = "Name"
value = "terraform-eks-demo"
propagate_at_launch = true
}

tag {
key = "kubernetes.io/cluster/${var.cluster-name}"
value = "owned"
propagate_at_launch = true
}
}

0 comments on commit 245aed5

Please sign in to comment.