/
provisioner.go
206 lines (174 loc) · 5.46 KB
/
provisioner.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
package vsphere
import (
"bytes"
"fmt"
"strings"
"text/template"
vtemplate "github.com/terraform-providers/terraform-provider-template/template"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere"
"github.com/kraken/terraformer"
"github.com/liferaft/kubekit/pkg/provisioner/utils"
)
// ResourceTemplates maps resource names to content of resources
// implementation specified in code.go
var ResourceTemplates map[string]string
// BeProvisioner setup the Plaftorm to be a Provisioner
func (p *Platform) BeProvisioner(state *terraformer.State) error {
// If I'm already a provisioner, return
if p.t != nil {
return nil
}
variables := p.Variables()
rendered := p.Code()
t, err := utils.NewTerraformer(rendered, variables, state, p.config.ClusterName, "vSphere", p.ui)
if err != nil {
return err
}
t.AddProvider("vsphere", vsphere.Provider())
t.AddProvider("template", vtemplate.Provider())
p.t = t
return nil
}
// Plan do the planning of the changes either to create or destroy the cluster on this platform.
func (p *Platform) Plan(destroy bool) (plan *terraformer.Plan, err error) {
if p.t == nil {
return nil, fmt.Errorf("cannot get the plan, the %s plaftorm is not a provisioner yet", p.name)
}
p.ui.Log.Debug("getting the cluster plan before apply it")
return p.t.Plan(destroy)
}
// Apply apply the changes either to create or destroy the cluster on this platform
func (p *Platform) Apply(destroy bool) error {
if p.t == nil {
return fmt.Errorf("cannot apply the changes, the %s plaftorm is not a provisioner yet", p.name)
}
if !destroy {
p.ui.Log.Debug("starting to provision the cluster")
} else {
p.ui.Log.Debug("starting to terminate the cluster")
}
return p.t.Apply(destroy)
}
// Provision provisions or creates a cluster on this platform
func (p *Platform) Provision() error {
if p.t == nil {
return fmt.Errorf("cannot provision the cluster, the %s plaftorm is not a provisioner yet", p.name)
}
return p.t.Apply(false)
}
// Terminate terminates or destroys a cluster on this platform
func (p *Platform) Terminate() error {
if p.t == nil {
return fmt.Errorf("cannot terminate the cluster, the %s plaftorm is not a provisioner yet", p.name)
}
return p.t.Apply(true)
}
// Code returns the Terraform code to execute
func (p *Platform) Code() []byte {
var templateContent bytes.Buffer
var renderedContent bytes.Buffer
for k, v := range ResourceTemplates {
templateContent.WriteString(fmt.Sprintf("# section created from template %s\n\n%s\n", k, v))
}
tmplFuncMap := template.FuncMap{
"Dash": func(s string) string { return strings.NewReplacer("_", "-", ".", "-").Replace(s) },
"Lower": func(s string) string { return strings.ToLower(s) },
"QuoteList": func(s []string) string {
return fmt.Sprintf(`"%s"`, strings.Join(s, `","`))
},
"Trim": strings.TrimSpace,
"MasterPool": func(pools map[string]NodePool) NodePool {
// master lookup by label
for _, pool := range pools {
for _, label := range pool.KubeletNodeLabels {
if label == `node-role.kubernetes.io/master=""` {
return pool
}
}
}
// fall back, check for named "master" even if label is incorrect
if master, ok := pools["master"]; ok {
return master
}
// return a default master pool, as it will likely be used just for the name
return NodePool{
Name: "master",
Count: 1,
}
},
"Count": func(count int) []int {
var i int
var counter []int
for i = 0; i < (count); i++ {
counter = append(counter, i)
}
return counter
},
}
resourceTpl, err := template.
New("vsphere").
Option("missingkey=error").
Funcs(tmplFuncMap).
Parse(templateContent.String())
if err != nil {
return []byte(fmt.Sprintf("failed at resourceTpl.New() with %s", err))
}
// reload config with default node pool merged in
// must not altering original config due to write back on config.yaml
copied := p.config.copyWithDefaults()
p.reconcileVersion(&copied)
// adjust node pools for legacy issues
switch p.version {
case "1.0":
nodePools := make(map[string]NodePool, len(copied.NodePools))
for key, pool := range copied.NodePools {
switch key {
case "master", "worker":
pool.Name = fmt.Sprintf("dumb-%s", key)
fallthrough
default:
nodePools[key] = pool
}
}
copied.NodePools = nodePools
default:
// do nothing
}
// future version switch placeholder
err = resourceTpl.Execute(&renderedContent, copied)
if err != nil {
return []byte(fmt.Sprintf("failed at resourceTpl.Execute() with %s\nmap contained: %v", err, p.config))
}
if p.t != nil {
p.t.Code = renderedContent.Bytes()
}
return renderedContent.Bytes()
}
// Variables returns the variables as a map where the key is the variable name
// Note: Variables has been reduced to sensative data fields such as credentials
// and private keys. All other values are rendered directly from Config.
func (p *Platform) Variables() map[string]interface{} {
return map[string]interface{}{
"vsphere_username": p.config.VsphereUsername,
"vsphere_password": p.config.VspherePassword,
"vsphere_server": p.config.VsphereServer,
}
}
func (p *Platform) reconcileVersion(config *Config) {
switch p.version {
case "1.0":
nodePools := make(map[string]NodePool, len(config.NodePools))
for key, pool := range config.NodePools {
switch key {
case "master", "worker":
pool.Name = fmt.Sprintf("dumb-%s", key)
fallthrough
default:
nodePools[key] = pool
}
}
config.NodePools = nodePools
default:
// do nothing for now
}
}