forked from hashicorp/terraform-provider-vsphere
/
resource_vsphere_vmfs_datastore.go
476 lines (426 loc) · 15 KB
/
resource_vsphere_vmfs_datastore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
package vsphere
import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/customattribute"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/datastore"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/folder"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/structure"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/viapi"
"github.com/vmware/govmomi/vim25/types"
)
const (
retryDeletePending = "retryDeletePending"
retryDeleteCompleted = "retryDeleteCompleted"
retryDeleteError = "retryDeleteError"
waitForDeletePending = "waitForDeletePending"
waitForDeleteCompleted = "waitForDeleteCompleted"
waitForDeleteError = "waitForDeleteError"
)
// formatVmfsDatastoreCreateRollbackErrorUpdate defines the verbose error for extending a
// disk on creation where rollback is not possible.
const formatVmfsDatastoreCreateRollbackErrorUpdate = `
WARNING: Dangling resource!
There was an error extending your datastore with disk: %q:
%s
Additionally, there was an error removing the created datastore:
%s
You will need to remove this datastore manually before trying again.
`
func resourceVSphereVmfsDatastore() *schema.Resource {
s := map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Description: "The name of the datastore.",
Required: true,
},
"host_system_id": &schema.Schema{
Type: schema.TypeString,
Description: "The managed object ID of the host to set up the datastore on.",
ForceNew: true,
Required: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Description: "The path to the datastore folder to put the datastore in.",
Optional: true,
ConflictsWith: []string{"datastore_cluster_id"},
StateFunc: folder.NormalizePath,
},
"datastore_cluster_id": &schema.Schema{
Type: schema.TypeString,
Description: "The managed object ID of the datastore cluster to place the datastore in.",
Optional: true,
ConflictsWith: []string{"folder"},
},
"disks": &schema.Schema{
Type: schema.TypeList,
Description: "The disks to add to the datastore.",
Required: true,
MinItems: 1,
Elem: &schema.Schema{Type: schema.TypeString},
},
}
structure.MergeSchema(s, schemaDatastoreSummary())
// Add tags schema
s[vSphereTagAttributeKey] = tagsSchema()
// Add custom attributes schema
s[customattribute.ConfigKey] = customattribute.ConfigSchema()
return &schema.Resource{
Create: resourceVSphereVmfsDatastoreCreate,
Read: resourceVSphereVmfsDatastoreRead,
Update: resourceVSphereVmfsDatastoreUpdate,
Delete: resourceVSphereVmfsDatastoreDelete,
CustomizeDiff: resourceVSphereVmfsDatastoreCustomizeDiff,
Importer: &schema.ResourceImporter{
State: resourceVSphereVmfsDatastoreImport,
},
Schema: s,
}
}
func resourceVSphereVmfsDatastoreCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
// Load up the tags client, which will validate a proper vCenter before
// attempting to proceed if we have tags defined.
tagsClient, err := tagsClientIfDefined(d, meta)
if err != nil {
return err
}
// Verify a proper vCenter before proceeding if custom attributes are defined
attrsProcessor, err := customattribute.GetDiffProcessorIfAttributesDefined(client, d)
if err != nil {
return err
}
hsID := d.Get("host_system_id").(string)
dss, err := hostDatastoreSystemFromHostSystemID(client, hsID)
if err != nil {
return fmt.Errorf("error loading host datastore system: %s", err)
}
// To ensure the datastore is fully created with all the disks that we want
// to add to it, first we add the initial disk, then we expand the disk with
// the rest of the extents.
disks := d.Get("disks").([]interface{})
disk := disks[0].(string)
spec, err := diskSpecForCreate(dss, disk)
if err != nil {
return err
}
spec.Vmfs.VolumeName = d.Get("name").(string)
ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout)
defer cancel()
ds, err := dss.CreateVmfsDatastore(ctx, *spec)
if err != nil {
return fmt.Errorf("error creating datastore with disk %s: %s", disk, err)
}
// Add any remaining disks.
for _, disk := range disks[1:] {
var extendSpec *types.VmfsDatastoreExtendSpec
extendSpec, err = diskSpecForExtend(dss, ds, disk.(string))
if err != nil {
// We have to destroy the created datastore here.
if remErr := removeDatastore(dss, ds); remErr != nil {
// We could not destroy the created datastore and there is now a dangling
// resource. We need to instruct the user to remove the datastore
// manually.
return fmt.Errorf(formatVmfsDatastoreCreateRollbackErrorUpdate, disk, err, remErr)
}
return fmt.Errorf("error fetching datastore extend spec for disk %q: %s", disk, err)
}
ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout)
defer cancel()
if _, err = extendVmfsDatastore(ctx, dss, ds, *extendSpec); err != nil {
if remErr := removeDatastore(dss, ds); remErr != nil {
// We could not destroy the created datastore and there is now a dangling
// resource. We need to instruct the user to remove the datastore
// manually.
return fmt.Errorf(formatVmfsDatastoreCreateRollbackErrorUpdate, disk, err, remErr)
}
return fmt.Errorf("error extending datastore with disk %q: %s", disk, err)
}
}
// Set the ID here now as most other issues here can be applied on an update,
// so we don't need to roll back on failure.
d.SetId(ds.Reference().Value)
// Move the datastore to the correct folder first, if specified.
f, err := resourceVSphereDatastoreApplyFolderOrStorageClusterPath(d, meta)
if err != nil {
return err
}
if !folder.PathIsEmpty(f) {
if err := datastore.MoveToFolderRelativeHostSystemID(client, ds, hsID, f); err != nil {
return fmt.Errorf("could not move datastore to folder %q: %s", f, err)
}
}
// Apply any pending tags now
if tagsClient != nil {
if err := processTagDiff(tagsClient, d, ds); err != nil {
return err
}
}
// Set custom attributes
if attrsProcessor != nil {
if err := attrsProcessor.ProcessDiff(ds); err != nil {
return err
}
}
// Done
return resourceVSphereVmfsDatastoreRead(d, meta)
}
func resourceVSphereVmfsDatastoreRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
id := d.Id()
ds, err := datastore.FromID(client, id)
if err != nil {
return fmt.Errorf("cannot find datastore: %s", err)
}
props, err := datastore.Properties(ds)
if err != nil {
return fmt.Errorf("could not get properties for datastore: %s", err)
}
if err := flattenDatastoreSummary(d, &props.Summary); err != nil {
return err
}
// Set the folder
if err := resourceVSphereDatastoreReadFolderOrStorageClusterPath(d, ds); err != nil {
return err
}
// We also need to update the disk list from the summary.
var disks []string
for _, disk := range props.Info.(*types.VmfsDatastoreInfo).Vmfs.Extent {
disks = append(disks, disk.DiskName)
}
if err := d.Set("disks", disks); err != nil {
return err
}
// Read tags if we have the ability to do so
if tagsClient, _ := meta.(*VSphereClient).TagsClient(); tagsClient != nil {
if err := readTagsForResource(tagsClient, ds, d); err != nil {
return err
}
}
// Read custom attributes
if customattribute.IsSupported(client) {
customattribute.ReadFromResource(client, props.Entity(), d)
}
return nil
}
func resourceVSphereVmfsDatastoreUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
// Load up the tags client, which will validate a proper vCenter before
// attempting to proceed if we have tags defined.
tagsClient, err := tagsClientIfDefined(d, meta)
if err != nil {
return err
}
// Verify a proper vCenter before proceeding if custom attributes are defined
attrsProcessor, err := customattribute.GetDiffProcessorIfAttributesDefined(client, d)
if err != nil {
return err
}
hsID := d.Get("host_system_id").(string)
dss, err := hostDatastoreSystemFromHostSystemID(client, hsID)
if err != nil {
return fmt.Errorf("error loading host datastore system: %s", err)
}
id := d.Id()
ds, err := datastore.FromID(client, id)
if err != nil {
return fmt.Errorf("cannot find datastore: %s", err)
}
// Rename this datastore if our name has drifted.
if d.HasChange("name") {
if err := viapi.RenameObject(client, ds.Reference(), d.Get("name").(string)); err != nil {
return err
}
}
// Update folder or datastore cluster if necessary
if d.HasChange("folder") || d.HasChange("datastore_cluster_id") {
f, err := resourceVSphereDatastoreApplyFolderOrStorageClusterPath(d, meta)
if err != nil {
return err
}
if err := datastore.MoveToFolder(client, ds, f); err != nil {
return fmt.Errorf("could not move datastore to folder %q: %s", f, err)
}
}
// Apply any pending tags now
if tagsClient != nil {
if err := processTagDiff(tagsClient, d, ds); err != nil {
return err
}
}
// Apply custom attribute updates
if attrsProcessor != nil {
if err := attrsProcessor.ProcessDiff(ds); err != nil {
return err
}
}
// Veto this update if it means a disk was removed. Shrinking
// datastores/removing extents is not supported.
old, new := d.GetChange("disks")
for _, v1 := range old.([]interface{}) {
var found bool
for _, v2 := range new.([]interface{}) {
if v1.(string) == v2.(string) {
found = true
}
}
if !found {
return fmt.Errorf("disk %s found in state but not config (removal of disks is not supported)", v1)
}
}
// Now we basically reverse what we did above when we were checking for
// removed disks, and add any new disks that have been added.
for _, v1 := range new.([]interface{}) {
var found bool
for _, v2 := range old.([]interface{}) {
if v1.(string) == v2.(string) {
found = true
}
}
if !found {
// Add the disk
spec, err := diskSpecForExtend(dss, ds, v1.(string))
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout)
defer cancel()
if _, err := extendVmfsDatastore(ctx, dss, ds, *spec); err != nil {
return err
}
}
}
// Should be done with the update here.
return resourceVSphereVmfsDatastoreRead(d, meta)
}
func resourceVSphereVmfsDatastoreDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
hsID := d.Get("host_system_id").(string)
dss, err := hostDatastoreSystemFromHostSystemID(client, hsID)
if err != nil {
return fmt.Errorf("error loading host datastore system: %s", err)
}
id := d.Id()
ds, err := datastore.FromID(client, id)
if err != nil {
return fmt.Errorf("cannot find datastore: %s", err)
}
// This is a race that more than likely will only come up during tests, but
// we still want to guard against it - when working with datastores that end
// up mounting across multiple hosts, removing the datastore will fail if
// it's removed too quickly (like right away, for example). So we set up a
// very short retry waiter to make sure if the first attempt fails, the
// second one should probably succeed right away. We also insert a small
// minimum delay to make an honest first attempt at trying to delete the
// datastore without spamming the task log with errors.
deleteRetryFunc := func() (interface{}, string, error) {
err := removeDatastore(dss, ds)
if err != nil {
if viapi.IsResourceInUseError(err) {
// Pending
return struct{}{}, retryDeletePending, nil
}
// Some other error
return struct{}{}, retryDeleteError, err
}
// Done
return struct{}{}, retryDeleteCompleted, nil
}
deleteRetry := &resource.StateChangeConf{
Pending: []string{retryDeletePending},
Target: []string{retryDeleteCompleted},
Refresh: deleteRetryFunc,
Timeout: 30 * time.Second,
MinTimeout: 2 * time.Second,
Delay: 2 * time.Second,
}
_, err = deleteRetry.WaitForState()
if err != nil {
return fmt.Errorf("could not delete datastore: %s", err)
}
// We need to make sure the datastore is completely removed. There appears to
// be a bit of a delay sometimes on vCenter, and it causes issues in tests,
// which means it could cause issues somewhere else too.
waitForDeleteFunc := func() (interface{}, string, error) {
_, err := datastore.FromID(client, id)
if err != nil {
if viapi.IsManagedObjectNotFoundError(err) {
// Done
return struct{}{}, waitForDeleteCompleted, nil
}
// Some other error
return struct{}{}, waitForDeleteError, err
}
return struct{}{}, waitForDeletePending, nil
}
waitForDelete := &resource.StateChangeConf{
Pending: []string{waitForDeletePending},
Target: []string{waitForDeleteCompleted},
Refresh: waitForDeleteFunc,
Timeout: defaultAPITimeout,
MinTimeout: 2 * time.Second,
Delay: 1 * time.Second,
NotFoundChecks: 35,
}
_, err = waitForDelete.WaitForState()
if err != nil {
return fmt.Errorf("error waiting for datastore to delete: %s", err.Error())
}
return nil
}
func resourceVSphereVmfsDatastoreCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
// Check all disks and make sure that the entries are not nil, empty, or duplicates.
disks := make(map[string]struct{})
for i, v := range d.Get("disks").([]interface{}) {
if v == nil || v.(string) == "" {
return fmt.Errorf("disk.%d: empty entry", i)
}
if _, ok := disks[v.(string)]; ok {
return fmt.Errorf("disk.%d: duplicate name %q", i, v.(string))
}
disks[v.(string)] = struct{}{}
}
return nil
}
func resourceVSphereVmfsDatastoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
// We support importing a MoRef - so we need to load the datastore and check
// to make sure 1) it exists, and 2) it's a VMFS datastore. If it is, we are
// good to go (rest of the stuff will be handled by read on refresh).
ids := strings.SplitN(d.Id(), ":", 2)
if len(ids) != 2 {
return nil, errors.New("please supply the ID in the following format: DATASTOREID:HOSTID")
}
id := ids[0]
hsID := ids[1]
client := meta.(*VSphereClient).vimClient
ds, err := datastore.FromID(client, id)
if err != nil {
return nil, fmt.Errorf("cannot find datastore: %s", err)
}
props, err := datastore.Properties(ds)
if err != nil {
return nil, fmt.Errorf("could not get properties for datastore: %s", err)
}
t := types.HostFileSystemVolumeFileSystemType(props.Summary.Type)
if t != types.HostFileSystemVolumeFileSystemTypeVMFS {
return nil, fmt.Errorf("datastore ID %q is not a VMFS datastore", id)
}
var found bool
for _, mount := range props.Host {
if mount.Key.Value == hsID {
found = true
}
}
if !found {
return nil, fmt.Errorf("configured host_system_id %q not found as a mounted host on datastore", hsID)
}
d.SetId(id)
d.Set("host_system_id", hsID)
return []*schema.ResourceData{d}, nil
}