Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Import VM with attached disk fails on plan #786

Closed
pryorda opened this issue Jun 10, 2019 · 4 comments
Closed

Import VM with attached disk fails on plan #786

pryorda opened this issue Jun 10, 2019 · 4 comments
Labels
bug Type: Bug stale Status: Stale

Comments

@pryorda
Copy link

pryorda commented Jun 10, 2019

Terraform Version

0.11.13

vSphere Provider Version

Master (with local patches for work around)

Affected Resource(s)

Please list the resources as a list, for example:

  • vsphere_virtual_machine
  • vsphere_virtual_disk

Terraform Configuration Files

# Providers
# VSphere
provider "vsphere" {}

provider "aws" {}

locals {
  host_basename = "${coalesce(var.hostname_override, "${var.environment}-${var.application}-${var.component}")}"
  role          = "${var.application}-${var.component}"
}

data "vsphere_datacenter" "datacenter" {
  name = "${var.vsphere_datacenter}"
}

data "vsphere_compute_cluster" "cluster" {
  name          = "${var.vsphere_cluster}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_datastore" "datastore" {
  name          = "${var.vsphere_datastore}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_resource_pool" "pool" {
  name          = "${var.vsphere_cluster}/Resources"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_network" "network" {
  name          = "${var.vsphere_network_label}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

data "vsphere_virtual_machine" "template" {
  name          = "${var.vsphere_template}"
  datacenter_id = "${data.vsphere_datacenter.datacenter.id}"
}

# DNS for instances
module "instance-dns" {
  source = "../instance_dns"

  providers = {
    aws = "aws"
  }

  instance_count    = "${var.instance_count}"
  service_provider  = "op"
  ttl               = "60"
  region            = "${lower(var.region)}"
  create_regionless = false
  hostname          = "${local.host_basename}"
  ips               = ["${vsphere_virtual_machine.instance.*.default_ip_address}"]
}

# Instance Resource
resource "vsphere_virtual_machine" "instance" {
  depends_on = ["vsphere_virtual_disk.data_disk"]

  count            = "${var.instance_count}"
  name             = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}"
  resource_pool_id = "${data.vsphere_resource_pool.pool.id}"
  datastore_id     = "${data.vsphere_datastore.datastore.id}"
  guest_id         = "${data.vsphere_virtual_machine.template.guest_id}"
  scsi_type        = "${data.vsphere_virtual_machine.template.scsi_type}"

  folder = "${var.vsphere_folder_path}"

  num_cpus           = "${var.vsphere_vcpu}"
  memory             = "${var.vsphere_memory}"
  memory_reservation = "${var.vsphere_reserved_memory}"

  enable_disk_uuid           = true
  wait_for_guest_net_timeout = 180

  network_interface {
    network_id     = "${data.vsphere_network.network.id}"
    adapter_type   = "${data.vsphere_virtual_machine.template.network_interface_types[0]}"
    mac_address    = "${var.static_macs != "" ? element(split(",", var.static_macs), count.index) : ""}"
    use_static_mac = "${var.static_macs == "" ? false : true}"
  }

  // This doesn't actually work and is a work around for customize spec.
  cdrom {
    datastore_id = "${data.vsphere_datastore.datastore.id}"
    path         = "ISOs/os-livecd.iso"
  }

  disk {
    path             = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}.vmdk"
    label            = "disk0"
    size             = "40"
    eagerly_scrub    = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}"
    thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}"
  }

  disk {
    path         = "${element(vsphere_virtual_disk.data_disk.*.vmdk_path, count.index)}"
    label        = "disk1"
    attach       = true
    unit_number  = 1
    datastore_id = "${data.vsphere_datastore.datastore.id}"
  }

  clone {
    template_uuid = "${data.vsphere_virtual_machine.template.id}"

    customize {
      dns_suffix_list = ["${lower(var.region)}.${lower(var.service_provider)}.domain.local", "${split(",",var.vsphere_network_domain_search)}"]

      linux_options {
        host_name = "${lower(local.host_basename)}${count.index + 1}"
        domain    = "${lower(var.region)}.${lower(var.service_provider)}.parchment.com"
        time_zone = "${var.vsphere_cluster_timezone}"
      }

      network_interface {}
    }
  }

  lifecycle {
    // Adding disk until https://github.com/terraform-providers/terraform-provider-vsphere/issues/227 is fixed
    ignore_changes = ["network_interface", "dns_suffixes", "disk", "clone", "enable_disk_uuid"]
  }

  provisioner "local-exec" {
    when = "destroy"

    interpreter = ["bash", "-c"]

    command = <<EOT
DATE=$(date +%Y/%m/%d/%H/%M)
aws logs create-log-stream --region ${var.aws_region} --log-group-name vsphere_deprovision --log-stream-name $DATE/${self.uuid}
read -d '' EVENT <<EOJ
  [{
    "timestamp": $(date +%s)000,
    "message": "{ \\\"source\\\": \\\"terraform.deprovision\\\", \\\"detail\\\": { \\\"state\\\": \\\"terminated\\\", \\\"uuid\\\": \\\"${self.uuid}\\\", \\\"fqdn\\\": \\\"${self.name}.op.parchment.com\\\" }, \\\"detail-type\\\": \\\"VSphere Instance Deprovision via Terraform\\\" }"
  }]
EOJ
aws logs put-log-events --region ${var.aws_region} --log-group-name vsphere_deprovision --log-stream-name $DATE/${self.uuid} --log-events "$EVENT"
EOT
  }
}

resource "vsphere_virtual_disk" "data_disk" {
  count      = "${var.instance_count}"
  size       = "${var.data_disk_size}"
  vmdk_path  = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
  datacenter = "${var.vsphere_datacenter}"
  datastore  = "${var.vsphere_datastore}"
  type       = "thin"

  lifecycle {
    prevent_destroy = false
  }
}

# Create Virtual Machine Anti-Affinity Rules
resource "vsphere_compute_cluster_vm_anti_affinity_rule" "cluster_vm_anti_affinity_rule" {
  count               = "${var.instance_count > 0 ? 1 : 0 }"
  name                = "${lower(local.host_basename)}.${lower(var.region)}.${lower(var.service_provider)}"
  compute_cluster_id  = "${data.vsphere_compute_cluster.cluster.id}"
  virtual_machine_ids = ["${vsphere_virtual_machine.instance.*.id}"]
  count               = "${var.instance_count > 0 ? 1 : 0}"
}

# Fun hack explained here https://github.com/hashicorp/terraform/issues/16580#issuecomment-342573652
output "instance_ids" {
  value = ["${concat(vsphere_virtual_machine.instance.*.uuid, list(""))}"]
}

output "instances_dns" {
  value = ["${formatlist("%s.%s", concat(vsphere_virtual_machine.instance.*.name, list("")), "op.parchment.com")}"]
}

output "instance_private_ips" {
  value = ["${concat(vsphere_virtual_machine.instance.*.default_ip_address, list(""))}"]
}

Debug Output

Can't add debug but i will add any lines that are requested.

Panic Output

NA

Expected Behavior

Import disk correctly and use size from attached disk.

Actual Behavior

Error: Error running plan: 1 error occurred:
        * module.vsphere_linux_thanos_store.vsphere_virtual_machine.instance: 1 error occurred:
        * module.vsphere_linux_thanos_store.vsphere_virtual_machine.instance[0]: disk.1: virtual disk "disk1": virtual disks cannot be shrunk (old: 150 new: 0) 

Steps to Reproduce

Please list the steps required to reproduce the issue, for example:

  1. terraform import vm path
  2. terraform plan

Work Around

I've managed to correct the issues by manually mangling the state and setting disk.1.attach: true and applying these diffs which is probably not the right way to do it.

diff --git a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
index 4197a50..4a326e3 100644
--- a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
+++ b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go
@@ -612,6 +612,7 @@ func DiskDiffOperation(d *schema.ResourceDiff, c *govmomi.Client) error {
        nds := n.([]interface{})
 
        normalized := make([]interface{}, len(ods))
+       log.Printf("What is nds: %#v", nds)
 nextNew:
        for _, ne := range nds {
                nm := ne.(map[string]interface{})
@@ -628,6 +629,8 @@ nextNew:
                        // We extrapolate using the label as a "primary key" of sorts.
                        if nname == oname {
                                r := NewDiskSubresource(c, d, nm, om, oi)
+                               log.Printf("Super messed up NM: %#v", nm)
+                               log.Printf("Super messed up OM: %#v", om)
                                if err := r.DiffExisting(); err != nil {
                                        return fmt.Errorf("%s: %s", r.Addr(), err)
                                }
@@ -1397,10 +1400,11 @@ func (r *DiskSubresource) DiffExisting() error {
        // we might want to change the name of this method, but we want to check this
        // here as CustomizeDiff is meant for vetoing.
        osize, nsize := r.GetChange("size")
-       if osize.(int) > nsize.(int) {
-               return fmt.Errorf("virtual disk %q: virtual disks cannot be shrunk (old: %d new: %d)", name, osize.(int), nsize.(int))
+       if !r.Get("attach").(bool) {
+               if osize.(int) > nsize.(int) {
+                       return fmt.Errorf("virtual disk %q: virtual disks cannot be shrunk (old: %d new: %d)", name, osize.(int), nsize.(int))
+               }
        }
-
        // Ensure that there is no change in either eagerly_scrub or thin_provisioned
        // - these values cannot be changed once set.
        if _, err = r.GetWithVeto("eagerly_scrub"); err != nil { 
@pryorda
Copy link
Author

pryorda commented Jun 10, 2019

@vancluever You might be able to tell me an easy way to have it look up the virtualdisk resource instead of using the local disk attributes.

@bill-rich bill-rich added the bug Type: Bug label Jun 21, 2019
@hashibot
Copy link

This issue has been open 180 days with no activity. If this issue is reproducible with the latest version of the provider and with Terraform 0.12, please comment. Otherwise this issue will be closed in 30 days.

@hashibot hashibot added the stale Status: Stale label Apr 22, 2020
@ghost
Copy link

ghost commented Jun 21, 2020

I'm going to lock this issue because it has been closed for 30 days ⏳. This helps our maintainers find and focus on the active issues.

If you feel this issue should be reopened, we encourage creating a new issue linking back to this one for added context. If you feel I made an error 🤖 🙉 , please reach out to my human friends 👉 hashibot-feedback@hashicorp.com. Thanks!

1 similar comment
@ghost
Copy link

ghost commented Jun 22, 2020

I'm going to lock this issue because it has been closed for 30 days ⏳. This helps our maintainers find and focus on the active issues.

If you feel this issue should be reopened, we encourage creating a new issue linking back to this one for added context. If you feel I made an error 🤖 🙉 , please reach out to my human friends 👉 hashibot-feedback@hashicorp.com. Thanks!

@hashicorp hashicorp locked and limited conversation to collaborators Jun 22, 2020
Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
bug Type: Bug stale Status: Stale
Projects
None yet
Development

No branches or pull requests

3 participants