Skip to content

Commit

Permalink
Refresh volume connections when starting instances
Browse files Browse the repository at this point in the history
This patches adds network and block device information to the signature
of the power_on method on the virtualization driver. The relevant call
sites are also modified to provide the required information. The libvirt
implementation of power_on has been altered to re-establish network and
volume related connections.

Fixes bug: 1188326

Change-Id: If617b570e082e3aa321414a2680a3aa0754f6153
  • Loading branch information
beagles committed Jun 25, 2013
1 parent 9331c5c commit db39895
Show file tree
Hide file tree
Showing 15 changed files with 100 additions and 39 deletions.
16 changes: 14 additions & 2 deletions nova/compute/manager.py
Expand Up @@ -1540,7 +1540,14 @@ def stop_instance(self, context, instance):
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)

network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.power_on(context, instance,
self._legacy_nw_info(network_info),
block_device_info)

current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
Expand Down Expand Up @@ -1595,7 +1602,12 @@ def restore_instance(self, context, instance):
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self.driver.power_on(instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.power_on(context, instance,
self._legacy_nw_info(network_info),
block_device_info)
current_power_state = self._get_power_state(context, instance)
instance = self._instance_update(context, instance['uuid'],
power_state=current_power_state,
Expand Down
3 changes: 2 additions & 1 deletion nova/tests/api/ec2/test_cloud.py
Expand Up @@ -2137,7 +2137,8 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id):

virt_driver = {}

def fake_power_on(self, instance):
def fake_power_on(self, context, instance, network_info,
block_device_info):
virt_driver['powered_on'] = True

self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
Expand Down
3 changes: 2 additions & 1 deletion nova/tests/compute/test_compute.py
Expand Up @@ -1427,7 +1427,8 @@ def test_power_on(self):

called = {'power_on': False}

def fake_driver_power_on(self, instance):
def fake_driver_power_on(self, context, instance, network_info,
block_device_info):
called['power_on'] = True

self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
Expand Down
21 changes: 16 additions & 5 deletions nova/tests/virt/hyperv/test_hypervapi.py
Expand Up @@ -495,13 +495,24 @@ def test_power_off_already_powered_off(self):
constants.HYPERV_VM_STATE_DISABLED)

def test_power_on(self):
self._test_vm_state_change(self._conn.power_on,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_ENABLED)
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()

def test_power_on_already_running(self):
self._test_vm_state_change(self._conn.power_on, None,
constants.HYPERV_VM_STATE_ENABLED)
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()

def test_reboot(self):

Expand Down
24 changes: 21 additions & 3 deletions nova/tests/virt/test_virt_drivers.py
Expand Up @@ -311,13 +311,14 @@ def test_power_off(self):
@catch_notimplementederror
def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(instance_ref)
self.connection.power_on(self.ctxt, instance_ref,
network_info, None)

@catch_notimplementederror
def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(instance_ref)
self.connection.power_on(self.ctxt, instance_ref, network_info, None)

@catch_notimplementederror
def test_soft_delete(self):
Expand Down Expand Up @@ -407,7 +408,24 @@ def test_attach_detach_different_power_states(self):
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
self.connection.power_on(instance_ref)

bdm = {
'root_device_name': None,
'swap': None,
'ephemerals': [],
'block_device_mapping': [{
'instance_uuid': instance_ref['uuid'],
'connection_info': {'driver_volume_type': 'fake'},
'mount_device': '/dev/sda',
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
}]
}
self.connection.power_on(self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
Expand Down
14 changes: 10 additions & 4 deletions nova/tests/virt/vmwareapi/test_vmwareapi.py
Expand Up @@ -335,14 +335,14 @@ def test_power_on(self):
self.conn.power_off(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.instance)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)

def test_power_on_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.instance)
self.context, self.instance, self.network_info)

def test_power_off(self):
self._create_vm()
Expand Down Expand Up @@ -427,7 +427,7 @@ def fake_vmops_update_instance_progress(context, instance, step,
self.assertEquals(4, step)
self.assertEqual(vmops.RESIZE_TOTAL_STEPS, total_steps)

self.stubs.Set(self.conn._vmops, "power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_update_instance_progress",
fake_vmops_update_instance_progress)

Expand All @@ -439,6 +439,7 @@ def fake_vmops_update_instance_progress(context, instance, step,
instance=self.instance,
disk_info=None,
network_info=None,
block_device_info=None,
image_meta=None,
power_on=power_on)
# verify the results
Expand Down Expand Up @@ -475,15 +476,20 @@ def fake_get_vm_ref_from_name(session, vm_name):
self.assertEquals(self.vm_name, vm_name)
return vmwareapi_fake._get_objects("VirtualMachine")[0]

def fake_get_vm_ref_from_uuid(session, vm_uuid):
return vmwareapi_fake._get_objects("VirtualMachine")[0]

def fake_call_method(*args, **kwargs):
pass

def fake_wait_for_task(*args, **kwargs):
pass

self.stubs.Set(self.conn._vmops, "power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_get_orig_vm_name_label",
fake_get_orig_vm_name_label)
self.stubs.Set(vm_util, "get_vm_ref_from_uuid",
fake_get_vm_ref_from_uuid)
self.stubs.Set(vm_util, "get_vm_ref_from_name",
fake_get_vm_ref_from_name)
self.stubs.Set(self.conn._session, "_call_method", fake_call_method)
Expand Down
6 changes: 4 additions & 2 deletions nova/virt/baremetal/driver.py
Expand Up @@ -250,7 +250,8 @@ def spawn(self, context, instance, image_meta, injected_files,
)
self.driver.activate_bootloader(context, node, instance,
network_info=network_info)
self.power_on(instance, node)
self.power_on(context, instance, network_info, block_device_info,
node)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE)
except Exception:
Expand Down Expand Up @@ -331,7 +332,8 @@ def power_off(self, instance, node=None):
"for instance %r") % instance['uuid'])
pm.stop_console()

def power_on(self, instance, node=None):
def power_on(self, context, instance, network_info, block_device_info=None,
node=None):
"""Power on the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/driver.py
Expand Up @@ -425,7 +425,8 @@ def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
raise NotImplementedError()

Expand Down
2 changes: 1 addition & 1 deletion nova/virt/fake.py
Expand Up @@ -182,7 +182,7 @@ def post_live_migration_at_destination(self, context, instance,
def power_off(self, instance):
pass

def power_on(self, instance):
def power_on(self, context, instance, network_info, block_device_info):
pass

def soft_delete(self, instance):
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/hyperv/driver.py
Expand Up @@ -103,7 +103,8 @@ def resume(self, instance, network_info, block_device_info=None):
def power_off(self, instance):
self._vmops.power_off(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance)

def live_migration(self, context, instance_ref, dest, post_method,
Expand Down
12 changes: 6 additions & 6 deletions nova/virt/libvirt/driver.py
Expand Up @@ -1465,13 +1465,13 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom, instance=instance)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)

def suspend(self, instance):
"""Suspend the specified instance."""
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/powervm/driver.py
Expand Up @@ -191,7 +191,8 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._powervm.power_off(instance['name'])

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._powervm.power_on(instance['name'])

Expand Down
10 changes: 6 additions & 4 deletions nova/virt/vmwareapi/driver.py
Expand Up @@ -218,9 +218,10 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
self._vmops._power_on(instance)

def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
Expand Down Expand Up @@ -379,15 +380,16 @@ def confirm_migration(self, migration, instance, network_info):
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(instance, power_on)
self._vmops.finish_revert_migration(instance, network_info,
block_device_info, power_on)

def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
power_on)
block_device_info, power_on)

def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
Expand Down
16 changes: 10 additions & 6 deletions nova/virt/vmwareapi/vmops.py
Expand Up @@ -807,7 +807,7 @@ def unrescue(self, instance):
instance['name'] = instance['name'] + self._rescue_suffix
self.destroy(instance, None)
instance['name'] = instance_orig_name
self.power_on(instance)
self._power_on(instance)

def power_off(self, instance):
"""Power off the specified instance."""
Expand All @@ -832,7 +832,7 @@ def power_off(self, instance):
LOG.debug(_("VM was already in powered off state. So returning "
"without doing anything"), instance=instance)

def power_on(self, instance):
def _power_on(self, instance):
"""Power on the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)

Expand All @@ -851,6 +851,9 @@ def power_on(self, instance):
self._session._wait_for_task(instance['uuid'], poweron_task)
LOG.debug(_("Powered on the VM"), instance=instance)

def power_on(self, context, instance, network_info, block_device_info):
self._power_on(instance)

def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'

Expand Down Expand Up @@ -954,7 +957,8 @@ def confirm_migration(self, migration, instance, network_info):
if network_info:
self.unplug_vifs(instance, network_info)

def finish_revert_migration(self, instance, power_on=True):
def finish_revert_migration(self, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
# The original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
Expand All @@ -972,15 +976,15 @@ def finish_revert_migration(self, instance, power_on=True):
LOG.debug(_("Renamed the VM from %s") % name_label,
instance=instance)
if power_on:
self.power_on(instance)
self._power_on(instance)

def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
power_on=True):
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
# 4. Start VM
if power_on:
self.power_on(instance)
self._power_on(instance)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/xenapi/driver.py
Expand Up @@ -266,7 +266,8 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)

Expand Down

0 comments on commit db39895

Please sign in to comment.