Skip to content

Commit

Permalink
Various fixes and improvements (#5)
Browse files Browse the repository at this point in the history
- Use the new EKS bootstrap script.
- Add the ability to attach a public key to EC2 worker nodes.
- Attach a root volume to worker nodes with a configurable size that
  defaults to 20GiB.
- Incorporate parent names in all child resources.
- Add an ingress rule that accommodates extension API servers.
- Use CloudFormation to set up the ASG in order to get rolling updates
  on launch configuration changes.
- Fix the type token for `ServiceRole`.
  • Loading branch information
pgavlin committed Sep 11, 2018
1 parent 39e1460 commit c6cfed8
Show file tree
Hide file tree
Showing 7 changed files with 234 additions and 89 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,6 @@ publish_packages:
# The travis_* targets are entrypoints for CI.
.PHONY: travis_cron travis_push travis_pull_request travis_api
travis_cron: all
travis_push: only_build only_test_fast publish_packages
travis_pull_request: only_build only_test_fast
travis_push: all publish_packages
travis_pull_request: all
travis_api: all
172 changes: 112 additions & 60 deletions nodejs/eks/cluster.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,13 @@
import * as aws from "@pulumi/aws";
import * as k8s from "@pulumi/kubernetes";
import * as pulumi from "@pulumi/pulumi";
import * as crypto from "crypto";
import * as fs from "fs";
import * as path from "path";

import { ServiceRole } from "./servicerole";
import { createStorageClass, EBSVolumeType, StorageClass } from "./storageclass";
import transform from "./transform";

/**
* ClusterOptions describes the configuration options accepted by an EKSCluster component.
Expand All @@ -42,6 +44,18 @@ export interface ClusterOptions {
*/
instanceType?: pulumi.Input<aws.ec2.InstanceType>;

/**
* Public key material for SSH access to worker nodes. See allowed formats at:
* https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
* If not provided, no SSH access is enabled on VMs.
*/
nodePublicKey?: pulumi.Input<string>;

/**
* The size in GiB of a cluster node's root volume. Defaults to 20.
*/
nodeRootVolumeSize?: pulumi.Input<number>;

/**
* The number of worker nodes that should be running in the cluster. Defaults to 2.
*/
Expand Down Expand Up @@ -108,6 +122,16 @@ export class Cluster extends pulumi.ComponentResource {
*/
public readonly provider: k8s.Provider;

/**
* The security group for the EKS cluster.
*/
public readonly clusterSecurityGroup: aws.ec2.SecurityGroup;

/**
* The security group for the cluster's nodes.
*/
public readonly nodeSecurityGroup: aws.ec2.SecurityGroup;

/**
* Create a new EKS cluster with worker nodes, optional storage classes, and deploy the Kubernetes Dashboard if
* requested.
Expand All @@ -132,7 +156,7 @@ export class Cluster extends pulumi.ComponentResource {
}

// Create the EKS service role
const eksRole = new ServiceRole("eksRole", {
const eksRole = new ServiceRole(`${name}-eksRole`, {
service: "eks.amazonaws.com",
description: "Allows EKS to manage clusters on your behalf.",
managedPolicyArns: [
Expand All @@ -149,19 +173,20 @@ export class Cluster extends pulumi.ComponentResource {
protocol: "-1", // all
cidrBlocks: [ "0.0.0.0/0" ],
};
const eksClusterSecurityGroup = new aws.ec2.SecurityGroup("eksClusterSecurityGroup", {
const eksClusterSecurityGroup = new aws.ec2.SecurityGroup(`${name}-eksClusterSecurityGroup`, {
vpcId: vpcId,
egress: [ allEgress ],
}, { parent: this });
this.clusterSecurityGroup = eksClusterSecurityGroup;

// Create the EKS cluster
const eksCluster = new aws.eks.Cluster("eksCluster", {
const eksCluster = new aws.eks.Cluster(`${name}-eksCluster`, {
roleArn: eksRole.role.apply(r => r.arn),
vpcConfig: { securityGroupIds: [ eksClusterSecurityGroup.id ], subnetIds: subnetIds },
}, { parent: this });

// Create the instance role we'll use for worker nodes.
const instanceRole = new ServiceRole("instanceRole", {
const instanceRole = new ServiceRole(`${name}-instanceRole`, {
service: "ec2.amazonaws.com",
managedPolicyArns: [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
Expand Down Expand Up @@ -209,12 +234,12 @@ export class Cluster extends pulumi.ComponentResource {

// Create the Kubernetes provider we'll use to manage the config map we need to allow worker nodes to access
// the EKS cluster.
const k8sProvider = new k8s.Provider("eks-k8s", {
const k8sProvider = new k8s.Provider(`${name}-eks-k8s`, {
kubeconfig: myKubeconfig.apply(JSON.stringify),
}, { parent: this });

// Enable access to the EKS cluster for worker nodes.
const eksNodeAccess = new k8s.core.v1.ConfigMap("nodeAccess", {
const eksNodeAccess = new k8s.core.v1.ConfigMap(`${name}-nodeAccess`, {
apiVersion: "v1",
metadata: {
name: "aws-auth",
Expand All @@ -229,18 +254,18 @@ export class Cluster extends pulumi.ComponentResource {
const storageClasses = args.storageClasses || "gp2";
if (typeof storageClasses === "string") {
const storageClass = { type: storageClasses, default: true };
createStorageClass(storageClasses, storageClass, { parent: this, provider: k8sProvider });
createStorageClass(`${name.toLowerCase()}-${storageClasses}`, storageClass, { parent: this, provider: k8sProvider });
} else {
for (const key of Object.keys(storageClasses)) {
createStorageClass(key, storageClasses[key], { parent: this, provider: k8sProvider });
createStorageClass(`${name.toLowerCase()}-${key}`, storageClasses[key], { parent: this, provider: k8sProvider });
}
}

// Create the cluster's worker nodes.
const instanceProfile = new aws.iam.InstanceProfile("instanceProfile", {
const instanceProfile = new aws.iam.InstanceProfile(`${name}-instanceProfile`, {
role: instanceRole.role,
}, { parent: this });
const instanceSecurityGroup = new aws.ec2.SecurityGroup("instanceSecurityGroup", {
const nodeSecurityGroup = new aws.ec2.SecurityGroup(`${name}-nodeSecurityGroup`, {
vpcId: vpcId,
ingress: [
{
Expand All @@ -257,47 +282,50 @@ export class Cluster extends pulumi.ComponentResource {
protocol: "tcp",
securityGroups: [ eksClusterSecurityGroup.id ],
},
{
description: "Allow pods running extension API servers on port 443 to receive communication from cluster control plane",
fromPort: 443,
toPort: 443,
protocol: "tcp",
securityGroups: [ eksClusterSecurityGroup.id ],
},
],
egress: [ allEgress ],
tags: eksCluster.name.apply(n => <aws.Tags>{
[`kubernetes.io/cluster/${n}`]: "owned",
}),
}, { parent: this });
const eksClusterIngressRule = new aws.ec2.SecurityGroupRule("eksClusterIngressRule", {
const eksClusterIngressRule = new aws.ec2.SecurityGroupRule(`${name}-eksClusterIngressRule`, {
description: "Allow pods to communicate with the cluster API Server",
type: "ingress",
fromPort: 443,
toPort: 443,
protocol: "tcp",
securityGroupId: eksClusterSecurityGroup.id,
sourceSecurityGroupId: instanceSecurityGroup.id,
sourceSecurityGroupId: nodeSecurityGroup.id,
}, { parent: this });
const instanceSecurityGroupId = pulumi.all([instanceSecurityGroup.id, eksClusterIngressRule.id])
const nodeSecurityGroupId = pulumi.all([nodeSecurityGroup.id, eksClusterIngressRule.id])
.apply(([id]) => id);
this.nodeSecurityGroup = nodeSecurityGroup;

// If requested, add a new EC2 KeyPair for SSH access to the instances.
let keyName: pulumi.Output<string> | undefined;
if (args.nodePublicKey) {
const key = new aws.ec2.KeyPair(`${name}-keyPair`, {
publicKey: args.nodePublicKey,
}, { parent: this });
keyName = key.keyName;
}

const cfnStackName = transform(`${name}-cfnStackName`, name, n => `${n}-${crypto.randomBytes(4).toString("hex")}`, { parent: this });

const awsRegion = pulumi.output(aws.getRegion({}, { parent: this }));
const userdata = pulumi.all([awsRegion, eksCluster.name, eksCluster.endpoint, eksCluster.certificateAuthority])
.apply(([region, clusterName, clusterEndpoint, clusterCertificateAuthority]) => {
return `#!/bin/bash -xe
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${clusterCertificateAuthority.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${clusterEndpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${clusterName},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${region.name},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${clusterEndpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
systemctl daemon-reload
systemctl restart kubelet kube-proxy
const userdata = pulumi.all([awsRegion, eksCluster.name, eksCluster.endpoint, eksCluster.certificateAuthority, cfnStackName])
.apply(([region, clusterName, clusterEndpoint, clusterCa, stackName]) => {
return `#!/bin/bash
/etc/eks/bootstrap.sh --apiserver-endpoint "${clusterEndpoint}" --b64-cluster-ca "${clusterCa.data}" "${clusterName}"
/opt/aws/bin/cfn-signal --exit-code $? --stack ${stackName} --resource NodeGroup --region ${region.name}
`;
});
const eksWorkerAmi = aws.getAmi({
Expand All @@ -308,41 +336,65 @@ systemctl restart kubelet kube-proxy
mostRecent: true,
owners: [ "602401143452" ], // Amazon
}, { parent: this });
const instanceLaunchConfiguration = new aws.ec2.LaunchConfiguration("instanceLaunchConfiguration", {
const nodeLaunchConfiguration = new aws.ec2.LaunchConfiguration(`${name}-nodeLaunchConfiguration`, {
associatePublicIpAddress: true,
imageId: eksWorkerAmi.then(r => r.imageId),
instanceType: args.instanceType || "t2.medium",
iamInstanceProfile: instanceProfile.id,
securityGroups: [ instanceSecurityGroupId ],
keyName: keyName,
securityGroups: [ nodeSecurityGroupId ],
rootBlockDevice: {
volumeSize: args.nodeRootVolumeSize || 20, // GiB
volumeType: "gp2", // default is "standard"
deleteOnTermination: true,
},
userData: userdata,
}, { parent: this });
const autoscalingGroup = new aws.autoscaling.Group("autoscalingGroup", {
desiredCapacity: args.desiredCapacity || 2,
launchConfiguration: instanceLaunchConfiguration.id,
maxSize: args.maxSize || 2,
minSize: args.minSize || 1,
vpcZoneIdentifiers: subnetIds,
tags: [
{
key: eksCluster.name.apply(n => `kubernetes.io/cluster/${n}`),
value: "owned",
propagateAtLaunch: true,
},
{
key: "Name",
value: eksCluster.name.apply(n => `${n}-worker`),
propagateAtLaunch: true,
},
],

const cfnSubnetIds = pulumi.output(subnetIds).apply(ids => pulumi.all(ids.map(pulumi.output))).apply(ids => JSON.stringify(ids));
const cfnTemplateBody = pulumi.all([
nodeLaunchConfiguration.id,
args.desiredCapacity || 2,
args.minSize || 1,
args.maxSize || 2,
eksCluster.name,
cfnSubnetIds,
]).apply(([launchConfig, desiredCapacity, minSize, maxSize, clusterName, vpcSubnetIds]) => `
AWSTemplateFormatVersion: '2010-09-09'
Resources:
NodeGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
DesiredCapacity: ${desiredCapacity}
LaunchConfigurationName: ${launchConfig}
MinSize: ${minSize}
MaxSize: ${maxSize}
VPCZoneIdentifier: ${vpcSubnetIds}
Tags:
- Key: Name
Value: ${clusterName}-worker
PropagateAtLaunch: 'true'
- Key: kubernetes.io/cluster/${clusterName}
Value: 'owned'
PropagateAtLaunch: 'true'
UpdatePolicy:
AutoScalingRollingUpdate:
MinInstancesInService: '1'
MaxBatchSize: '1'
`);

const cfnStack = new aws.cloudformation.Stack(`${name}-nodes`, {
name: cfnStackName,
templateBody: cfnTemplateBody,
}, { parent: this, dependsOn: eksNodeAccess });

// Export the cluster's kubeconfig with a dependency upon the cluster's autoscaling group. This will help
// ensure that the cluster's consumers do not attempt to use the cluster until its workers are attached.
this.kubeconfig = pulumi.all([autoscalingGroup.id, myKubeconfig]).apply(([_, kubeconfig]) => kubeconfig);
this.kubeconfig = pulumi.all([cfnStack.id, myKubeconfig]).apply(([_, kubeconfig]) => kubeconfig);

// Export a k8s provider with the above kubeconfig. Note that we do not export the provider we created earlier
// in order to help ensure that worker nodes are available before the provider can be used.
this.provider = new k8s.Provider("provider", {
this.provider = new k8s.Provider(`${name}-provider`, {
kubeconfig: this.kubeconfig.apply(JSON.stringify),
}, { parent: this });

Expand All @@ -355,20 +407,20 @@ systemctl restart kubelet kube-proxy
path.join(__dirname, "dashboard", "influxdb.yaml"),
path.join(__dirname, "dashboard", "heapster-rbac.yaml"),
].map(filePath => fs.readFileSync(filePath).toString());
const dashboard = new k8s.yaml.ConfigGroup("dashboard", {
const dashboard = new k8s.yaml.ConfigGroup(`${name}-dashboard`, {
yaml: dashboardYaml,
}, { parent: this, providers: { kubernetes: this.provider } });

// Create a service account for admin access.
const adminAccount = new k8s.core.v1.ServiceAccount("eks-admin", {
const adminAccount = new k8s.core.v1.ServiceAccount(`${name}-eks-admin`, {
metadata: {
name: "eks-admin",
namespace: "kube-system",
},
}, { parent: this, provider: this.provider });

// Create a role binding for the admin account.
const adminRoleBinding = new k8s.rbac.v1.ClusterRoleBinding("eks-admin", {
const adminRoleBinding = new k8s.rbac.v1.ClusterRoleBinding(`${name}-eks-admin`, {
metadata: {
name: "eks-admin",
},
Expand Down
6 changes: 3 additions & 3 deletions nodejs/eks/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
"homepage": "https://pulumi.io",
"repository": "https://github.com/pulumi/eks",
"dependencies": {
"@pulumi/aws": "^0.15.0",
"@pulumi/kubernetes": "^0.15.2-dev",
"@pulumi/pulumi": "^0.15.0"
"@pulumi/aws": "^0.15.1",
"@pulumi/kubernetes": "^v0.17.0-rc1",
"@pulumi/pulumi": "^0.15.1"
},
"devDependencies": {
"@types/node": "^8.0.26",
Expand Down
2 changes: 1 addition & 1 deletion nodejs/eks/servicerole.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ export class ServiceRole extends pulumi.ComponentResource {
* @param opts A bag of options that control this copmonent's behavior.
*/
constructor(name: string, args: ServiceRoleArgs, opts?: pulumi.ResourceOptions) {
super("ServiceRole", name, args, opts);
super("eks:index:ServiceRole", name, args, opts);

const assumeRolePolicy = pulumi.output(args.service).apply(service => JSON.stringify({
Version: "2012-10-17",
Expand Down
1 change: 0 additions & 1 deletion nodejs/eks/storageclass.ts
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ export function createStorageClass(name: string, storageClass: StorageClass, opt
// Compute the storage class's metadata, including its name and default storage class annotation.
const metadata = pulumi.all([storageClass.metadata || {}, storageClass.default])
.apply(([m, isDefault]) => {
m.name = m.name || name;
if (isDefault) {
m.annotations = { ...m.annotations || {}, "storageclass.kubernetes.io/is-default-class": "true" };
}
Expand Down
49 changes: 49 additions & 0 deletions nodejs/eks/transform.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

import * as pulumi from "@pulumi/pulumi";
import * as dynamic from "@pulumi/pulumi/dynamic";

/**
* Transform is a dynamic resource that evaluates a function when its inputs change and exports the result.
*/
class Transform<T, U> extends dynamic.Resource {
public readonly output: pulumi.Output<U>;

constructor(name: string, input: T, func: (v: T) => U, opts?: pulumi.CustomResourceOptions) {
const provider = {
check: (olds: any, news: any) => Promise.resolve({ inputs: news, failedChecks: [] }),
diff: (id: pulumi.ID, olds: any, news: any) => Promise.resolve({}),
create: (inputs: any) => Promise.resolve({
id: name,
outs: { output: func(inputs.input as T) },
}),
update: (id: pulumi.ID, olds: any, news: any) => Promise.resolve({
outs: { output: func(news.input as T) },
}),
read: (id: pulumi.ID, state: any) => Promise.resolve({id: id, props: state}),
delete: (id: pulumi.ID, props: any) => Promise.resolve(),
};

super(provider, name, { input: input, output: undefined }, opts);
}
}

/**
* transform evaluates the given function on the given input iff the input's value has changed and returns the result
* as an output.
*/
export default function transform<T, U>(name: string, input: T, func: (v: T) => U, opts?: pulumi.CustomResourceOptions): pulumi.Output<U> {
return (new Transform(name, input, func, opts)).output;
}
Loading

0 comments on commit c6cfed8

Please sign in to comment.