Skip to content

Commit

Permalink
xenapi: Refactor snapshots during resize
Browse files Browse the repository at this point in the history
Currently, we use VM.snapshot for resize, which fails if we have a
volume attached to the VM, which does not support snapshots. This change
uses VDI.snapshot instead, for all VDIs that are not attached by nova.

Also needed for xenapi: detaching and reattaching volumes during
migrations and reverting of migrations.

Fixes Bug #1028092

Change-Id: I3e2973747135a9c33de194e38537620c397bb87e
(cherry picked from commit 3595275)
  • Loading branch information
Renuka Apte authored and Chuck Short committed Oct 24, 2012
1 parent d1e462d commit d5888f1
Show file tree
Hide file tree
Showing 6 changed files with 119 additions and 31 deletions.
44 changes: 39 additions & 5 deletions nova/compute/manager.py
Expand Up @@ -790,8 +790,10 @@ def _get_instance_volume_bdm(self, context, instance_uuid, volume_id):
if str(bdm['volume_id']) == str(volume_id):
return bdm

def _get_instance_volume_block_device_info(self, context, instance_uuid):
bdms = self._get_instance_volume_bdms(context, instance_uuid)
def _get_instance_volume_block_device_info(self, context, instance_uuid,
bdms=None):
if bdms is None:
bdms = self._get_instance_volume_bdms(context, instance_uuid)
block_device_mapping = []
for bdm in bdms:
try:
Expand Down Expand Up @@ -850,7 +852,7 @@ def _shutdown_instance(self, context, instance):
# NOTE(vish) get bdms before destroying the instance
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
context, instance['uuid'], bdms=bdms)
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
for bdm in bdms:
Expand Down Expand Up @@ -1441,6 +1443,14 @@ def revert_resize(self, context, instance, migration_id,

self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
# Terminate volume connections.
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.terminate_connection(context, volume,
connector)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration_ref['id'], migration_ref['source_compute'],
reservations)
Expand Down Expand Up @@ -1468,8 +1478,15 @@ def finish_revert_resize(self, context, migration_id, instance,
old_instance_type = migration_ref['old_instance_type_id']
instance_type = instance_types.get_instance_type(old_instance_type)

bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
context, instance['uuid'])
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume,
connector)

self.driver.finish_revert_migration(instance,
self._legacy_nw_info(network_info),
Expand Down Expand Up @@ -1593,6 +1610,15 @@ def resize_instance(self, context, instance,
instance_type_ref, self._legacy_nw_info(network_info),
block_device_info)

# Terminate volume connections.
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.terminate_connection(context, volume,
connector)

self.db.migration_update(context,
migration_id,
{'status': 'post-migrating'})
Expand Down Expand Up @@ -1640,8 +1666,16 @@ def _finish_resize(self, context, instance, migration_ref, disk_info,
context, instance, "finish_resize.start",
network_info=network_info)

bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
context, instance['uuid'], bdms=bdms)

if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume,
connector)

self.driver.finish_migration(context, migration_ref, instance,
disk_info,
Expand Down
5 changes: 3 additions & 2 deletions nova/tests/test_xenapi.py
Expand Up @@ -358,7 +358,8 @@ def fake_get_rrd(host, vm_uuid):
self.assertDictMatch(fake_diagnostics, expected)

def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
Expand All @@ -367,7 +368,7 @@ def create_bad_vbd(vm_ref, vdi_ref):
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref

self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
Expand Down
27 changes: 25 additions & 2 deletions nova/virt/xenapi/driver.py
Expand Up @@ -188,13 +188,27 @@ def finish_revert_migration(self, instance, network_info,
"""Finish reverting a resize, powering back on the instance"""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'], mount_device)

def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
"""Completes a resize, turning on the migrated instance"""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'], mount_device)

def snapshot(self, context, instance, image_id):
""" Create snapshot from a running VM instance """
Expand Down Expand Up @@ -237,8 +251,17 @@ def migrate_disk_and_power_off(self, context, instance, dest,
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
# NOTE(vish): Xen currently does not use network info.
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
rv = self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
name_label = self._vmops._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info,
name_label, mount_device)
return rv

def suspend(self, instance):
"""suspend the specified instance"""
Expand Down
50 changes: 31 additions & 19 deletions nova/virt/xenapi/vm_utils.py
Expand Up @@ -524,39 +524,51 @@ def snapshot_attached_here(session, instance, vm_ref, label):
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)

template_vm_ref, template_vdi_uuid = _create_snapshot(
session, instance, vm_ref, label)

try:
vdi_snapshot_recs = _vdi_snapshot_vm_base(session, instance, vm_ref)
sr_ref = vm_vdi_rec["SR"]
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)

vdi_uuids = [vdi_rec['uuid'] for vdi_rec in
_walk_vdi_chain(session, template_vdi_uuid)]
vdi_uuids = []
for snapshot in vdi_snapshot_recs:
vdi_uuids += [vdi_rec['uuid'] for vdi_rec in
_walk_vdi_chain(session, snapshot['uuid'])]

yield vdi_uuids
finally:
_destroy_snapshot(session, instance, template_vm_ref)


def _create_snapshot(session, instance, vm_ref, label):
template_vm_ref = session.call_xenapi('VM.snapshot', vm_ref, label)
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
_destroy_snapshots(session, instance, vdi_snapshot_recs)

LOG.debug(_("Created snapshot %(template_vdi_uuid)s with label"
" '%(label)s'"), locals(), instance=instance)

return template_vm_ref, template_vdi_uuid
def _vdi_snapshot_vm_base(session, instance, vm_ref):
"""Make a snapshot of every non-cinder VDI and return a list
of the new vdi records.
"""
new_vdis = []
try:
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
oc = session.call_xenapi("VBD.get_other_config", vbd_ref)
if 'osvol' not in oc:
# This volume is not a nova/cinder volume
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
snapshot_ref = session.call_xenapi("VDI.snapshot", vdi_ref,
{})
new_vdis.append(session.call_xenapi("VDI.get_record",
snapshot_ref))

except session.XenAPI.Failure:
LOG.exception(_("Failed to snapshot VDI"), instance=instance)
raise
finally:
return new_vdis


def _destroy_snapshot(session, instance, vm_ref):
vdi_refs = lookup_vm_vdis(session, vm_ref)
def _destroy_snapshots(session, instance, vdi_snapshot_recs):
vdi_refs = [session.call_xenapi("VDI.get_by_uuid", vdi_rec['uuid'])
for vdi_rec in vdi_snapshot_recs]
safe_destroy_vdis(session, vdi_refs)

destroy_vm(session, instance, vm_ref)


def get_sr_path(session):
"""Return the path to our storage repository
Expand Down
20 changes: 18 additions & 2 deletions nova/virt/xenapi/vmops.py
Expand Up @@ -196,10 +196,26 @@ def finish_migration(self, context, migration, instance, disk_info,
if resize_instance:
self._resize_instance(instance, root_vdi)

# Check if kernel and ramdisk are external
kernel_file = None
ramdisk_file = None

name_label = instance['name']
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')

vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
network_info, image_meta)

network_info, image_meta, kernel_file,
ramdisk_file)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
Expand Down
4 changes: 3 additions & 1 deletion nova/virt/xenapi/volumeops.py
Expand Up @@ -215,8 +215,10 @@ def detach_volume(self, connection_info, instance_name, mountpoint):
raise Exception(_('Unable to locate volume %s') % mountpoint)

try:
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
vm_utils.unplug_vbd(self._session, vbd_ref)
if vm_rec['power_state'] != 'Halted':
vm_utils.unplug_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
Expand Down

0 comments on commit d5888f1

Please sign in to comment.