Skip to content

Commit

Permalink
Add context as parameter for resume
Browse files Browse the repository at this point in the history
Now for KVM when resume an instance with block storage, nova
compute will throw exception and failed to resume the VM.

The root cause is that when resume a VM with block storage,
libvirt driver needs to call conductor via rpcapi to update
block device, but the function of resume() do not have context,
this will cause RPC api failed.

Change-Id: I712777ed1d893a2b6463d30c407b0a677e37b602
Closes-Bug: #1241337
  • Loading branch information
Jay Lau committed Nov 22, 2013
1 parent db86ec9 commit de41588
Show file tree
Hide file tree
Showing 13 changed files with 66 additions and 20 deletions.
2 changes: 1 addition & 1 deletion nova/compute/manager.py
Expand Up @@ -3440,7 +3440,7 @@ def resume_instance(self, context, instance):
block_device_info = self._get_instance_volume_block_device_info(
context, instance)

self.driver.resume(instance, network_info,
self.driver.resume(context, instance, network_info,
block_device_info)

instance.power_state = self._get_power_state(context, instance)
Expand Down
6 changes: 4 additions & 2 deletions nova/tests/virt/hyperv/test_hypervapi.py
Expand Up @@ -543,12 +543,14 @@ def test_suspend_already_suspended(self):
constants.HYPERV_VM_STATE_SUSPENDED)

def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None),
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)

def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None), None,
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None), None,
constants.HYPERV_VM_STATE_ENABLED)

def test_power_off(self):
Expand Down
36 changes: 36 additions & 0 deletions nova/tests/virt/libvirt/test_libvirt.py
Expand Up @@ -49,6 +49,7 @@
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import uuidutils
from nova.pci import pci_manager
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
Expand Down Expand Up @@ -4358,6 +4359,41 @@ def fake_get_info(instance_name):
conn._hard_reboot(self.context, instance, network_info,
block_device_info)

def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = db.instance_create(self.context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(conn, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(conn, '_create_domain_and_network',
return_value='fake_dom'),
mock.patch.object(conn, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs):
conn.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(dummyxml,
instance, network_info,
block_device_info=block_device_info,
context=self.context)])
_attach_pci_devices.assert_has_calls([mock.call('fake_dom',
'fake_pci_devs')])

def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
Expand Down
3 changes: 2 additions & 1 deletion nova/tests/virt/powervm/test_powervm.py
Expand Up @@ -841,7 +841,8 @@ def test_suspend(self):
def test_resume(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError, self.powervm_connection.resume,
instance=None, network_info=None)
context.get_admin_context(), instance=None,
network_info=None)

def test_host_power_action(self):
# Check to make sure the method raises NotImplementedError.
Expand Down
4 changes: 2 additions & 2 deletions nova/tests/virt/test_virt_drivers.py
Expand Up @@ -374,13 +374,13 @@ def test_suspend(self):
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(instance_ref, network_info)
self.connection.resume(self.ctxt, instance_ref, network_info)

@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
self.connection.resume(instance_ref, network_info)
self.connection.resume(self.ctxt, instance_ref, network_info)

@catch_notimplementederror
def test_destroy_instance_nonexistent(self):
Expand Down
6 changes: 3 additions & 3 deletions nova/tests/virt/vmwareapi/test_vmwareapi.py
Expand Up @@ -497,23 +497,23 @@ def test_resume(self):
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
self.conn.resume(self.context, self.instance, self.network_info)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)

def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.instance, self.network_info)
self.context, self.instance, self.network_info)

def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
self.context, self.instance, self.network_info)

def test_power_on(self):
self._create_vm()
Expand Down
13 changes: 10 additions & 3 deletions nova/virt/driver.py
Expand Up @@ -419,9 +419,16 @@ def suspend(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()

def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
def resume(self, context, instance, network_info, block_device_info=None):
"""
resume the specified instance.
:param context: the context for the resume
:param instance: the instance being resumed
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
"""
raise NotImplementedError()

def resume_state_on_host_boot(self, context, instance, network_info,
Expand Down
2 changes: 1 addition & 1 deletion nova/virt/fake.py
Expand Up @@ -198,7 +198,7 @@ def unpause(self, instance):
def suspend(self, instance):
pass

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
pass

def destroy(self, context, instance, network_info, block_device_info=None,
Expand Down
2 changes: 1 addition & 1 deletion nova/virt/hyperv/driver.py
Expand Up @@ -100,7 +100,7 @@ def unpause(self, instance):
def suspend(self, instance):
self._vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)

def power_off(self, instance):
Expand Down
4 changes: 2 additions & 2 deletions nova/virt/libvirt/driver.py
Expand Up @@ -2032,12 +2032,12 @@ def suspend(self, instance):
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(xml, instance, network_info,
block_device_info)
block_device_info=block_device_info, context=context)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))

Expand Down
2 changes: 1 addition & 1 deletion nova/virt/powervm/driver.py
Expand Up @@ -201,7 +201,7 @@ def suspend(self, instance):
raise NotImplementedError(_("Suspend is not supported by the"
"PowerVM driver."))

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
raise NotImplementedError(_("Resume is not supported by the"
"PowerVM driver."))
Expand Down
4 changes: 2 additions & 2 deletions nova/virt/vmwareapi/driver.py
Expand Up @@ -203,7 +203,7 @@ def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)

Expand Down Expand Up @@ -676,7 +676,7 @@ def suspend(self, instance):
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.resume(instance)
Expand Down
2 changes: 1 addition & 1 deletion nova/virt/xenapi/driver.py
Expand Up @@ -318,7 +318,7 @@ def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)

Expand Down

0 comments on commit de41588

Please sign in to comment.