Skip to content

Commit

Permalink
Refresh volume connections when starting instances
Browse files Browse the repository at this point in the history
This patches adds network and block device information to the signature
of the power_on method on the virtualization driver. The relevant call
sites are also modified to provide the required information. The libvirt
implementation of power_on has been altered to re-establish network and
volume related connections.

Fixes bug: 1188326

(cherry picked from commit db39895)

Conflicts:
	nova/tests/test_vmwareapi.py
	nova/virt/baremetal/driver.py
	nova/virt/libvirt/driver.py
	nova/virt/vmwareapi/driver.py
	nova/virt/vmwareapi/vmops.py

(Includes some collateral additions for console tests in the vmware
 tests.)

Change-Id: If617b570e082e3aa321414a2680a3aa0754f6153
  • Loading branch information
beagles committed Jun 26, 2013
1 parent 1066418 commit ce789f7
Show file tree
Hide file tree
Showing 15 changed files with 171 additions and 39 deletions.
16 changes: 14 additions & 2 deletions nova/compute/manager.py
Expand Up @@ -1428,7 +1428,14 @@ def stop_instance(self, context, instance):
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)

network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.power_on(context, instance,
self._legacy_nw_info(network_info),
block_device_info)

current_power_state = self._get_power_state(context, instance)
instance = self._instance_update(context, instance['uuid'],
power_state=current_power_state,
Expand Down Expand Up @@ -1484,7 +1491,12 @@ def restore_instance(self, context, instance):
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self.driver.power_on(instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.power_on(context, instance,
self._legacy_nw_info(network_info),
block_device_info)
current_power_state = self._get_power_state(context, instance)
instance = self._instance_update(context, instance['uuid'],
power_state=current_power_state,
Expand Down
3 changes: 2 additions & 1 deletion nova/tests/api/ec2/test_cloud.py
Expand Up @@ -1945,7 +1945,8 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id):

virt_driver = {}

def fake_power_on(self, instance):
def fake_power_on(self, context, instance, network_info,
block_device_info):
virt_driver['powered_on'] = True

self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
Expand Down
3 changes: 2 additions & 1 deletion nova/tests/compute/test_compute.py
Expand Up @@ -1063,7 +1063,8 @@ def test_power_on(self):

called = {'power_on': False}

def fake_driver_power_on(self, instance):
def fake_driver_power_on(self, context, instance, network_info,
block_device_info):
called['power_on'] = True

self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
Expand Down
21 changes: 16 additions & 5 deletions nova/tests/test_hypervapi.py
Expand Up @@ -490,13 +490,24 @@ def test_power_off_already_powered_off(self):
constants.HYPERV_VM_STATE_DISABLED)

def test_power_on(self):
self._test_vm_state_change(self._conn.power_on,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_ENABLED)
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()

def test_power_on_already_running(self):
self._test_vm_state_change(self._conn.power_on, None,
constants.HYPERV_VM_STATE_ENABLED)
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()

def test_reboot(self):

Expand Down
24 changes: 21 additions & 3 deletions nova/tests/test_virt_drivers.py
Expand Up @@ -305,13 +305,14 @@ def test_power_off(self):
@catch_notimplementederror
def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(instance_ref)
self.connection.power_on(self.ctxt, instance_ref,
network_info, None)

@catch_notimplementederror
def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(instance_ref)
self.connection.power_on(self.ctxt, instance_ref, network_info, None)

@catch_notimplementederror
def test_soft_delete(self):
Expand Down Expand Up @@ -401,7 +402,24 @@ def test_attach_detach_different_power_states(self):
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
self.connection.power_on(instance_ref)

bdm = {
'root_device_name': None,
'swap': None,
'ephemerals': [],
'block_device_mapping': [{
'instance_uuid': instance_ref['uuid'],
'connection_info': {'driver_volume_type': 'fake'},
'mount_device': '/dev/sda',
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
}]
}
self.connection.power_on(self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
Expand Down
79 changes: 76 additions & 3 deletions nova/tests/test_vmwareapi.py
Expand Up @@ -19,6 +19,9 @@
"""
Test suite for VMwareAPI.
"""
import urllib2

import mox

from nova.compute import power_state
from nova.compute import task_states
Expand All @@ -33,6 +36,22 @@
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volume_util


class fake_vm_ref(object):
def __init__(self):
self.value = 4
self._type = 'VirtualMachine'


class fake_http_resp(object):
def __init__(self):
self.code = 200

def read(self):
return "console log"


class VMwareAPIVMTestCase(test.TestCase):
Expand Down Expand Up @@ -256,14 +275,14 @@ def test_power_on(self):
self.conn.power_off(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.instance)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)

def test_power_on_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.instance)
self.context, self.instance, self.network_info)

def test_power_off(self):
self._create_vm()
Expand Down Expand Up @@ -316,7 +335,61 @@ def test_diagnostics(self):
pass

def test_get_console_output(self):
pass
vm_ref = fake_vm_ref()
result = fake_http_resp()
self._create_instance_in_the_db()
self.mox.StubOutWithMock(vm_util, 'get_vm_ref_from_name')
self.mox.StubOutWithMock(urllib2, 'urlopen')
vm_util.get_vm_ref_from_name(mox.IgnoreArg(), self.instance['name']).\
AndReturn(vm_ref)
urllib2.urlopen(mox.IgnoreArg()).AndReturn(result)

self.mox.ReplayAll()
self.conn.get_console_output(self.instance)

def test_diagnostics_non_existent_vm(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_diagnostics,
self.instance)

def test_get_console_pool_info(self):
info = self.conn.get_console_pool_info("console_type")
self.assertEquals(info['address'], 'test_url')
self.assertEquals(info['username'], 'test_username')
self.assertEquals(info['password'], 'test_pass')

def test_get_vnc_console_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_vnc_console,
self.instance)

def test_get_vnc_console(self):
vm_ref = fake_vm_ref()
self._create_instance_in_the_db()
self._create_vm()
self.mox.StubOutWithMock(self.conn._vmops, '_get_vnc_port')
self.conn._vmops._get_vnc_port(mox.IgnoreArg()).AndReturn(5910)
self.mox.ReplayAll()
vnc_dict = self.conn.get_vnc_console(self.instance)
self.assertEquals(vnc_dict['host'], "test_url")
self.assertEquals(vnc_dict['port'], 5910)

def test_host_ip_addr(self):
self.assertEquals(self.conn.get_host_ip_addr(), "test_url")

def test_get_volume_connector(self):

def fake_iqn(session, instance):
return "iscsi-name"

self._create_instance_in_the_db()
self.stubs.Set(volume_util, "get_host_iqn", fake_iqn)
connector_dict = self.conn.get_volume_connector(self.instance)
self.assertEquals(connector_dict['ip'], "test_url")
self.assertEquals(connector_dict['initiator'], "iscsi-name")
self.assertEquals(connector_dict['host'], "test_url")


class VMwareAPIHostTestCase(test.TestCase):
Expand Down
6 changes: 4 additions & 2 deletions nova/virt/baremetal/driver.py
Expand Up @@ -249,7 +249,8 @@ def spawn(self, context, instance, image_meta, injected_files,
network_info=network_info,
)
self.driver.activate_bootloader(context, node, instance)
self.power_on(instance, node)
self.power_on(context, instance, network_info, block_device_info,
node)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE)
except Exception:
Expand Down Expand Up @@ -330,7 +331,8 @@ def power_off(self, instance, node=None):
"for instance %r") % instance['uuid'])
pm.stop_console()

def power_on(self, instance, node=None):
def power_on(self, context, instance, network_info, block_device_info=None,
node=None):
"""Power on the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/driver.py
Expand Up @@ -404,7 +404,8 @@ def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
raise NotImplementedError()

Expand Down
2 changes: 1 addition & 1 deletion nova/virt/fake.py
Expand Up @@ -182,7 +182,7 @@ def post_live_migration_at_destination(self, context, instance,
def power_off(self, instance):
pass

def power_on(self, instance):
def power_on(self, context, instance, network_info, block_device_info):
pass

def soft_delete(self, instance):
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/hyperv/driver.py
Expand Up @@ -103,7 +103,8 @@ def resume(self, instance, network_info, block_device_info=None):
def power_off(self, instance):
self._vmops.power_off(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance)

def live_migration(self, context, instance_ref, dest, post_method,
Expand Down
12 changes: 6 additions & 6 deletions nova/virt/libvirt/driver.py
Expand Up @@ -1401,13 +1401,13 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom, instance=instance)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)

def suspend(self, instance):
"""Suspend the specified instance."""
Expand Down
3 changes: 2 additions & 1 deletion nova/virt/powervm/driver.py
Expand Up @@ -194,7 +194,8 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._powervm.power_off(instance['name'])

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._powervm.power_on(instance['name'])

Expand Down
11 changes: 7 additions & 4 deletions nova/virt/vmwareapi/driver.py
Expand Up @@ -219,9 +219,10 @@ def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)

def power_on(self, instance):
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
self._vmops._power_on(instance)

def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
Expand Down Expand Up @@ -371,14 +372,16 @@ def confirm_migration(self, migration, instance, network_info):
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(instance)
self._vmops.finish_revert_migration(instance, network_info,
block_device_info)

def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance)
network_info, image_meta, resize_instance,
block_device_info)

def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
Expand Down

0 comments on commit ce789f7

Please sign in to comment.