Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Import VM with attached disk fails on plan #786

Open
pryorda opened this issue Jun 10, 2019 · 1 comment

Comments

Projects
None yet
1 participant
@pryorda
Copy link

commented Jun 10, 2019

Terraform Version

0.11.13

vSphere Provider Version

Master (with local patches for work around)

Affected Resource(s)

Please list the resources as a list, for example:

  • vsphere_virtual_machine
  • vsphere_virtual_disk

Terraform Configuration Files

# Providers
# VSphere
provider "vsphere" {}

provider "aws" {}

locals {
  host_basename = "${coalesce(var.hostname_override, "${var.environment}-${var.application}-${var.component}")}"
  role          = "${var.application}-${var.component}"
}

data "vsphere_datacenter" "datacenter" {
  name = "${var.vsphere_datacenter}"
}

data "vsphere_compute_cluster" "cluster" {
  name          = "${var.vsphere_cluster}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_datastore" "datastore" {
  name          = "${var.vsphere_datastore}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_resource_pool" "pool" {
  name          = "${var.vsphere_cluster}/Resources"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_network" "network" {
  name          = "${var.vsphere_network_label}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_virtual_machine" "template" {
  name          = "${var.vsphere_template}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

# DNS for instances
module "instance-dns" {
  source = "../instance_dns"

  providers = {
    aws = "aws"
  }

  instance_count    = "${var.instance_count}"
  service_provider  = "op"
  ttl               = "60"
  region            = "${lower(var.region)}"
  create_regionless = false
  hostname          = "${local.host_basename}"
  ips               = ["${vsphere_virtual_machine.instance.*.default_ip_address}"]
}

# Instance Resource
resource "vsphere_virtual_machine" "instance" {
  depends_on = ["vsphere_virtual_disk.data_disk"]

  count            = "${var.instance_count}"
  name             = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}"
  resource_pool_id = "${data.vsphere_resource_pool.pool.id}"
  datastore_id     = "${data.vsphere_datastore.datastore.id}"
  guest_id         = "${data.vsphere_virtual_machine.template.guest_id}"
  scsi_type        = "${data.vsphere_virtual_machine.template.scsi_type}"

  folder = "${var.vsphere_folder_path}"

  num_cpus           = "${var.vsphere_vcpu}"
  memory             = "${var.vsphere_memory}"
  memory_reservation = "${var.vsphere_reserved_memory}"

  enable_disk_uuid           = true
  wait_for_guest_net_timeout = 180

  network_interface {
    network_id     = "${data.vsphere_network.network.id}"
    adapter_type   = "${data.vsphere_virtual_machine.template.network_interface_types[0]}"
    mac_address    = "${var.static_macs != "" ? element(split(",", var.static_macs), count.index) : ""}"
    use_static_mac = "${var.static_macs == "" ? false : true}"
  }

  // This doesn't actually work and is a work around for customize spec.
  cdrom {
    datastore_id = "${data.vsphere_datastore.datastore.id}"
    path         = "ISOs/os-livecd.iso"
  }

  disk {
    path             = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}.vmdk"
    label            = "disk0"
    size             = "40"
    eagerly_scrub    = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}"
    thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}"
  }

  disk {
    path         = "${element(vsphere_virtual_disk.data_disk.*.vmdk_path, count.index)}"
    label        = "disk1"
    attach       = true
    unit_number  = 1
    datastore_id = "${data.vsphere_datastore.datastore.id}"
  }

  clone {
    template_uuid = "${data.vsphere_virtual_machine.template.id}"

    customize {
      dns_suffix_list = ["${lower(var.region)}.${lower(var.service_provider)}.domain.local", "${split(",",var.vsphere_network_domain_search)}"]

      linux_options {
        host_name = "${lower(local.host_basename)}${count.index + 1}"
        domain    = "${lower(var.region)}.${lower(var.service_provider)}.parchment.com"
        time_zone = "${var.vsphere_cluster_timezone}"
      }

      network_interface {}
    }
  }

  lifecycle {
    // Adding disk until https://github.com/terraform-providers/terraform-provider-vsphere/issues/227 is fixed
    ignore_changes = ["network_interface", "dns_suffixes", "disk", "clone", "enable_disk_uuid"]
  }

  provisioner "local-exec" {
    when = "destroy"

    interpreter = ["bash", "-c"]

    command = <<EOT
DATE=$(date +%Y/%m/%d/%H/%M)
aws logs create-log-stream --region ${var.aws_region} --log-group-name vsphere_deprovision --log-stream-name $DATE/${self.uuid}
read -d '' EVENT <<EOJ
  [{
    "timestamp": $(date +%s)000,
    "message": "{ \\\"source\\\": \\\"terraform.deprovision\\\", \\\"detail\\\": { \\\"state\\\": \\\"terminated\\\", \\\"uuid\\\": \\\"${self.uuid}\\\", \\\"fqdn\\\": \\\"${self.name}.op.parchment.com\\\" }, \\\"detail-type\\\": \\\"VSphere Instance Deprovision via Terraform\\\" }"
  }]
EOJ
aws logs put-log-events --region ${var.aws_region} --log-group-name vsphere_deprovision --log-stream-name $DATE/${self.uuid} --log-events "$EVENT"
EOT
  }
}

resource "vsphere_virtual_disk" "data_disk" {
  count      = "${var.instance_count}"
  size       = "${var.data_disk_size}"
  vmdk_path  = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
  datacenter = "${var.vsphere_datacenter}"
  datastore  = "${var.vsphere_datastore}"
  type       = "thin"

  lifecycle {
    prevent_destroy = false
  }
}

# Create Virtual Machine Anti-Affinity Rules
resource "vsphere_compute_cluster_vm_anti_affinity_rule" "cluster_vm_anti_affinity_rule" {
  count               = "${var.instance_count > 0 ? 1 : 0 }"
  name                = "${lower(local.host_basename)}.${lower(var.region)}.${lower(var.service_provider)}"
  compute_cluster_id  = "${data.vsphere_compute_cluster.cluster.id}"
  virtual_machine_ids = ["${vsphere_virtual_machine.instance.*.id}"]
  count               = "${var.instance_count > 0 ? 1 : 0}"
}

# Fun hack explained here https://github.com/hashicorp/terraform/issues/16580#issuecomment-342573652
output "instance_ids" {
  value = ["${concat(vsphere_virtual_machine.instance.*.uuid, list(""))}"]
}

output "instances_dns" {
  value = ["${formatlist("%s.%s", concat(vsphere_virtual_machine.instance.*.name, list("")), "op.parchment.com")}"]
}

output "instance_private_ips" {
  value = ["${concat(vsphere_virtual_machine.instance.*.default_ip_address, list(""))}"]
}

Debug Output

Can't add debug but i will add any lines that are requested.

Panic Output

NA

Expected Behavior

Import disk correctly and use size from attached disk.

Actual Behavior

Error: Error running plan: 1 error occurred:
        * module.vsphere_linux_thanos_store.vsphere_virtual_machine.instance: 1 error occurred:
        * module.vsphere_linux_thanos_store.vsphere_virtual_machine.instance[0]: disk.1: virtual disk "disk1": virtual disks cannot be shrunk (old: 150 new: 0) 

Steps to Reproduce

Please list the steps required to reproduce the issue, for example:

  1. terraform import vm path
  2. terraform plan

Work Around

I've managed to correct the issues by manually mangling the state and setting disk.1.attach: true and applying these diffs which is probably not the right way to do it.

diff --git a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
index 4197a50..4a326e3 100644
--- a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
+++ b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
@@ -612,6 +612,7 @@ func DiskDiffOperation(d *schema.ResourceDiff, c *govmomi.Client) error {
        nds := n.([]interface{})
 
        normalized := make([]interface{}, len(ods))
+       log.Printf("What is nds: %#v", nds)
 nextNew:
        for _, ne := range nds {
                nm := ne.(map[string]interface{})
@@ -628,6 +629,8 @@ nextNew:
                        // We extrapolate using the label as a "primary key" of sorts.
                        if nname == oname {
                                r := NewDiskSubresource(c, d, nm, om, oi)
+                               log.Printf("Super messed up NM: %#v", nm)
+                               log.Printf("Super messed up OM: %#v", om)
                                if err := r.DiffExisting(); err != nil {
                                        return fmt.Errorf("%s: %s", r.Addr(), err)
                                }
@@ -1397,10 +1400,11 @@ func (r *DiskSubresource) DiffExisting() error {
        // we might want to change the name of this method, but we want to check this
        // here as CustomizeDiff is meant for vetoing.
        osize, nsize := r.GetChange("size")
-       if osize.(int) > nsize.(int) {
-               return fmt.Errorf("virtual disk %q: virtual disks cannot be shrunk (old: %d new: %d)", name, osize.(int), nsize.(int))
+       if !r.Get("attach").(bool) {
+               if osize.(int) > nsize.(int) {
+                       return fmt.Errorf("virtual disk %q: virtual disks cannot be shrunk (old: %d new: %d)", name, osize.(int), nsize.(int))
+               }
        }
-
        // Ensure that there is no change in either eagerly_scrub or thin_provisioned
        // - these values cannot be changed once set.
        if _, err = r.GetWithVeto("eagerly_scrub"); err != nil { 
@pryorda

This comment has been minimized.

Copy link
Author

commented Jun 10, 2019

@vancluever You might be able to tell me an easy way to have it look up the virtualdisk resource instead of using the local disk attributes.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.