/
tfcmd.go
242 lines (208 loc) · 7.07 KB
/
tfcmd.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
package terraform
import (
"fmt"
be_init "github.com/hashicorp/terraform/backend/init"
"github.com/hashicorp/terraform/command"
"github.com/kyma-incubator/hydroform/provision/types"
hashiCli "github.com/mitchellh/cli"
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
"strings"
)
// tfInit runs the 'terraform init' command with the specified options and config in the given working directory.
// Always run this before creating any files in the given dir, modules can only be downloaded into empty dirs.
// If the given dir is not empty, no modules will be downloaded and init will assume there is a valid module in dir.
func tfInit(ops Options, p types.ProviderType, cfg map[string]interface{}, dir string) error {
// need to init all backends before we start
be_init.Init(ops.Services)
i := &command.InitCommand{
Meta: ops.Meta,
}
if p == types.Gardener {
// on gardener we manage the provider download ourselves
// verification will fail but we know it is fine
os.Setenv(command.ProviderSkipVerifyEnvVar, "1")
}
if e := i.Run(initArgs(p, cfg, dir)); e != 0 {
return checkUIErrors(ops.Ui)
}
return nil
}
// initArgs generates the flag list for the terraform init command based on the operator configuration
func initArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {
args := make([]string, 0)
empty, err := isEmptyDir(clusterDir)
if err != nil {
fmt.Printf("Could not verify if the cluster directory is empty: %s.\nAttempting initialisation without downloading modules.", err)
}
// Only download module if the directory is empty, otherwise we might
// already have a valid module config from a previous persistent operation.
if empty {
// TODO remove this condition when fully migrated to modules
if m := tfMod(p); m != "" {
args = append(args, fmt.Sprintf("-from-module=%s", m))
}
}
if runtime.GOOS == "windows" { // remove '\\?\' path prefix
clusterDir = clusterDir[4:]
}
args = append(args, clusterDir)
return args
}
// tfMod returns the terraform module URL for the given provider or empty string if none avilable
func tfMod(p types.ProviderType) string {
switch p {
case types.Azure:
return azureMod
case types.AWS:
return ""
case types.GCP:
return ""
case types.Gardener:
return ""
default:
return ""
}
}
// tfApply runs a smart 'terraform apply' command with the specified options
// and config in the given working directory.
//
// The smart logic is as follows:
// - Apply is attempted regularly
// - If failed with error "already exists" it means that the cluster exists but
// we do not have its state locally, so import the existing cluster and
// refresh the local state.
// - if failed with error "not found" => probably state is corrupt => delete
// the state and start over with apply.
func tfApply(ops Options, p types.ProviderType, cfg map[string]interface{}, dir string) error {
a := &command.ApplyCommand{
Meta: ops.Meta,
}
e := a.Run(applyArgs(p, cfg, dir))
if e != 0 {
errList := checkUIErrors(ops.Ui)
// if cluster already exists import it and refresh the state
if strings.Contains(strings.ToLower(errList.Error()), "already exists") {
i := &command.ImportCommand{
Meta: ops.Meta,
}
if e := i.Run(importArgs(p, cfg, dir)); e != 0 {
return checkUIErrors(ops.Ui)
}
r := &command.RefreshCommand{
Meta: ops.Meta,
}
if e := r.Run(refreshArgs(p, cfg, dir)); e != 0 {
return checkUIErrors(ops.Ui)
}
return nil
}
// if cluster was not found, cluster got deeted on the remote or state is wrong, delete state and start over
if strings.Contains(errList.Error(), "not found") {
// delete the corrupt state file
stateFile := filepath.Join(dir, tfStateFile)
if err := os.Remove(stateFile); err != nil {
return err
}
// try applying again
if err := tfApply(ops, p, cfg, dir); err != nil {
return errors.Wrap(err, errList.Error())
} else {
return nil
}
}
return errList
}
return nil
}
// tfDestroy runs the 'terraform destroy' command with the specified options and config in the given working directory
func tfDestroy(ops Options, p types.ProviderType, cfg map[string]interface{}, dir string) error {
a := &command.ApplyCommand{
Meta: ops.Meta,
Destroy: true,
}
if e := a.Run(applyArgs(p, cfg, dir)); e != 0 {
return checkUIErrors(ops.Ui)
}
return nil
}
// applyArgs generates the flag list for the terraform apply command based on the operator configuration
func applyArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {
args := make([]string, 0)
stateFile := filepath.Join(clusterDir, tfStateFile)
varsFile := filepath.Join(clusterDir, tfVarsFile)
args = append(args,
fmt.Sprintf("-state=%s", stateFile),
fmt.Sprintf("-var-file=%s", varsFile),
"-auto-approve",
clusterDir)
return args
}
// importArgs generates the flag list for the terraform import command based on the operator configuration
func importArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {
args := make([]string, 0)
stateFile := filepath.Join(clusterDir, tfStateFile)
varsFile := filepath.Join(clusterDir, tfVarsFile)
args = append(args,
fmt.Sprintf("-state=%s", stateFile),
fmt.Sprintf("-state-out=%s", stateFile),
fmt.Sprintf("-var-file=%s", varsFile),
fmt.Sprintf("-config=%s", clusterDir),
clusterResource(p), // cluster resource
clusterID(p, cfg)) // cluster ID
return args
}
// clusterResource returns the cluster resource type defined in the terraform module for the given provider.
func clusterResource(p types.ProviderType) string {
switch p {
case types.GCP:
return "google_container_cluster.gke_cluster"
case types.Azure:
return "azurerm_kubernetes_cluster.azure_cluster"
case types.Gardener:
return "gardener_shoot.gardener_cluster"
case types.AWS:
return "not supported"
}
return ""
}
// clusterID generates a cluster ID based on the given config.
// Each provider has a different way of identifying clusters.
func clusterID(p types.ProviderType, cfg map[string]interface{}) string {
switch p {
case types.GCP:
return fmt.Sprintf("%s/%s/%s", cfg["project"], cfg["location"], cfg["cluster_name"])
case types.Gardener:
return fmt.Sprintf("%s/%s", cfg["namespace"], cfg["cluster_name"])
case types.AWS:
return "not supported"
}
return ""
}
// refreshArgs generates the flag list for the terraform refresh command based on the operator configuration
func refreshArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {
args := make([]string, 0)
stateFile := filepath.Join(clusterDir, tfStateFile)
varsFile := filepath.Join(clusterDir, tfVarsFile)
args = append(args,
fmt.Sprintf("-state=%s", stateFile),
fmt.Sprintf("-var-file=%s", varsFile),
clusterDir)
return args
}
func checkUIErrors(ui hashiCli.Ui) error {
var errsum strings.Builder
if h, ok := ui.(*HydroUI); ok {
for _, e := range h.Errors() {
if _, err := errsum.WriteString(e.Error()); err != nil {
return errors.Wrap(err, "could not fetch errors from terraform")
}
}
}
if errsum.Len() != 0 {
return errors.New(errsum.String())
}
return nil
}