-
Notifications
You must be signed in to change notification settings - Fork 0
/
create.go
231 lines (181 loc) · 8.18 KB
/
create.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
package main
import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/kubicorn/kubicorn/pkg/logger"
"github.com/weaveworks/eksctl/pkg/ami"
"github.com/weaveworks/eksctl/pkg/eks"
"github.com/weaveworks/eksctl/pkg/eks/api"
"github.com/weaveworks/eksctl/pkg/utils"
"github.com/weaveworks/eksctl/pkg/utils/kubeconfig"
)
func createCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "create",
Short: "Create resource(s)",
Run: func(c *cobra.Command, _ []string) {
c.Help()
},
}
cmd.AddCommand(createClusterCmd())
return cmd
}
const (
DEFAULT_NODE_COUNT = 2
DEFAULT_NODE_TYPE = "m5.large"
DEFAULT_SSH_PUBLIC_KEY = "~/.ssh/id_rsa.pub"
)
var (
writeKubeconfig bool
kubeconfigPath string
autoKubeconfigPath bool
setContext bool
availabilityZones []string
)
func createClusterCmd() *cobra.Command {
cfg := &api.ClusterConfig{}
cmd := &cobra.Command{
Use: "cluster",
Short: "Create a cluster",
Run: func(_ *cobra.Command, args []string) {
if err := doCreateCluster(cfg, getNameArg(args)); err != nil {
logger.Critical("%s\n", err.Error())
os.Exit(1)
}
},
}
fs := cmd.Flags()
exampleClusterName := utils.ClusterName("", "")
fs.StringVarP(&cfg.ClusterName, "name", "n", "", fmt.Sprintf("EKS cluster name (generated if unspecified, e.g. %q)", exampleClusterName))
fs.StringVarP(&cfg.Region, "region", "r", api.DEFAULT_EKS_REGION, "AWS region")
fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)")
fs.StringToStringVarP(&cfg.Tags, "tags", "", map[string]string{}, `A list of KV pairs used to tag the AWS resources (e.g. "Owner=John Doe,Team=Some Team")`)
fs.StringVarP(&cfg.NodeType, "node-type", "t", DEFAULT_NODE_TYPE, "node instance type")
fs.IntVarP(&cfg.Nodes, "nodes", "N", DEFAULT_NODE_COUNT, "total number of nodes (for a static ASG)")
// TODO: https://github.com/weaveworks/eksctl/issues/28
fs.IntVarP(&cfg.MinNodes, "nodes-min", "m", 0, "minimum nodes in ASG")
fs.IntVarP(&cfg.MaxNodes, "nodes-max", "M", 0, "maximum nodes in ASG")
fs.IntVar(&cfg.MaxPodsPerNode, "max-pods-per-node", 0, "maximum number of pods per node (set automatically if unspecified)")
fs.StringSliceVar(&availabilityZones, "zones", nil, "(auto-select if unspecified)")
fs.BoolVar(&cfg.NodeSSH, "ssh-access", false, "control SSH access for nodes")
fs.StringVar(&cfg.SSHPublicKeyPath, "ssh-public-key", DEFAULT_SSH_PUBLIC_KEY, "SSH public key to use for nodes (import from local path, or use existing EC2 key pair)")
fs.BoolVar(&writeKubeconfig, "write-kubeconfig", true, "toggle writing of kubeconfig")
fs.BoolVar(&autoKubeconfigPath, "auto-kubeconfig", false, fmt.Sprintf("save kubconfig file by cluster name, e.g. %q", kubeconfig.AutoPath(exampleClusterName)))
fs.StringVar(&kubeconfigPath, "kubeconfig", kubeconfig.DefaultPath, "path to write kubeconfig (incompatible with --auto-kubeconfig)")
fs.BoolVar(&setContext, "set-kubeconfig-context", true, "if true then current-context will be set in kubeconfig; if a context is already set then it will be overwritten")
fs.DurationVar(&cfg.WaitTimeout, "aws-api-timeout", api.DefaultWaitTimeout, "")
fs.MarkHidden("aws-api-timeout") // TODO deprecate in 0.2.0
fs.DurationVar(&cfg.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations")
fs.BoolVar(&cfg.Addons.WithIAM.PolicyAmazonEC2ContainerRegistryPowerUser, "full-ecr-access", false, "enable full access to ECR")
fs.BoolVar(&cfg.Addons.Storage, "storage-class", true, "if true (default) then a default StorageClass of type gp2 provisioned by EBS will be created")
fs.StringVar(&cfg.NodeAMI, "node-ami", ami.ResolverStatic, "Advanced use cases only. If 'static' is supplied (default) then eksctl will use static AMIs; if 'auto' is supplied then eksctl will automatically set the AMI based on region/instance type; if any other value is supplied it will override the AMI to use for the nodes. Use with extreme care.")
return cmd
}
func doCreateCluster(cfg *api.ClusterConfig, name string) error {
ctl := eks.New(cfg)
if err := ctl.CheckAuth(); err != nil {
return err
}
if utils.ClusterName(cfg.ClusterName, name) == "" {
return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.ClusterName, name)
}
cfg.ClusterName = utils.ClusterName(cfg.ClusterName, name)
if autoKubeconfigPath {
if kubeconfigPath != kubeconfig.DefaultPath {
return fmt.Errorf("--kubeconfig and --auto-kubeconfig cannot be used at the same time")
}
kubeconfigPath = kubeconfig.AutoPath(cfg.ClusterName)
}
if cfg.SSHPublicKeyPath == "" {
return fmt.Errorf("--ssh-public-key must be non-empty string")
}
if cfg.Region != api.EKS_REGION_US_WEST_2 && cfg.Region != api.EKS_REGION_US_EAST_1 && cfg.Region != api.EKS_REGION_EU_WEST_1 {
return fmt.Errorf("--region=%s is not supported only %s, %s and %s are supported", cfg.Region, api.EKS_REGION_US_WEST_2, api.EKS_REGION_US_EAST_1, api.EKS_REGION_EU_WEST_1)
}
if err := ctl.SetAvailabilityZones(availabilityZones); err != nil {
return err
}
if err := ctl.EnsureAMI(); err != nil {
return err
}
if err := ctl.LoadSSHPublicKey(); err != nil {
return err
}
logger.Debug("cfg = %#v", cfg)
logger.Info("creating EKS cluster %q in %q region", cfg.ClusterName, cfg.Region)
{ // core action
stackManager := ctl.NewStackManager()
logger.Info("will create 2 separate CloudFormation stacks for cluster itself and the initial nodegroup")
logger.Info("if you encounter any issues, check CloudFormation console or try 'eksctl utils describe-stacks --region=%s --name=%s'", cfg.Region, cfg.ClusterName)
errs := stackManager.CreateClusterWithInitialNodeGroup()
// read any errors (it only gets non-nil errors)
if len(errs) > 0 {
logger.Info("%d error(s) occurred and cluster hasn't been created properly, you may wish to check CloudFormation console", len(errs))
logger.Info("to cleanup resources, run 'eksctl delete cluster --region=%s --name=%s'", cfg.Region, cfg.ClusterName)
for _, err := range errs {
logger.Critical("%s\n", err.Error())
}
return fmt.Errorf("failed to create cluster %q", cfg.ClusterName)
}
}
logger.Success("all EKS cluster resource for %q had been created", cfg.ClusterName)
// obtain cluster credentials, write kubeconfig
{ // post-creation action
clientConfigBase, err := ctl.NewClientConfig()
if err != nil {
return err
}
if writeKubeconfig {
config := clientConfigBase.WithExecAuthenticator()
kubeconfigPath, err = kubeconfig.Write(kubeconfigPath, config.Client, setContext)
if err != nil {
return errors.Wrap(err, "writing kubeconfig")
}
logger.Success("saved kubeconfig as %q", kubeconfigPath)
} else {
kubeconfigPath = ""
}
// create Kubernetes client
clientSet, err := clientConfigBase.NewClientSetWithEmbeddedToken()
if err != nil {
return err
}
if err := ctl.WaitForControlPlane(clientSet); err != nil {
return err
}
// authorise nodes to join
if err := ctl.CreateDefaultNodeGroupAuthConfigMap(clientSet); err != nil {
return err
}
// wait for nodes to join
if err := ctl.WaitForNodes(clientSet); err != nil {
return err
}
// add default storage class
if cfg.Addons.Storage == true {
if err := ctl.AddDefaultStorageClass(clientSet); err != nil {
return err
}
}
// check kubectl version, and offer install instructions if missing or old
// also check heptio-authenticator
// TODO: https://github.com/weaveworks/eksctl/issues/30
env, err := ctl.GetCredentialsEnv()
if err != nil {
return err
}
if err := utils.CheckAllCommands(kubeconfigPath, setContext, clientConfigBase.ContextName, env); err != nil {
logger.Critical("%s\n", err.Error())
logger.Info("cluster should be functional despite missing (or misconfigured) client binaries")
}
// If GPU instance type, give instructions
if utils.IsGPUInstanceType(cfg.NodeType) {
logger.Info("as you are using a GPU optimized instance type you will need to install NVIDIA Kubernetes device plugin.")
logger.Info("\t see the following page for instructions: https://github.com/NVIDIA/k8s-device-plugin")
}
}
logger.Success("EKS cluster %q in %q region is ready", cfg.ClusterName, cfg.Region)
return nil
}