Skip to content

Commit

Permalink
libvirt: Refresh volume connection_info after volume snapshot
Browse files Browse the repository at this point in the history
The following patch is related to the guest assisted snapshot
functionality.  When you take a snapshot of a volume
(e.g. GlusterFS) attached to a running instance, a new snapshot
file is created, i.e. volume-<uuid>.<snapshot-uuid>.  The
instance uses this file as the active volume.  If you shutdown
and restart the instance, nova will reattach the base volume
(volume-<uuid>) to the instance instead of the snapshot volume
(volume-<uuid>.<snapshot-uuid>).  The expected behavior is to
have the snapshot volume reattach to the instance.  This is
caused by stale data being returned from the database when
_get_instance_volume_block_device_info is called during
_power_on.  To fix this bug, this patch calls
refresh_connection_info to update the database in both
volume_snapshot_create and _volume_snapshot_delete methods of the
libvirt driver.

Change-Id: I0f340a3f879580e7981d97863bc299e33d71aa84
Closes-Bug: #1304695
  • Loading branch information
thang-pham committed May 1, 2014
1 parent 601b55f commit 329b594
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 0 deletions.
35 changes: 35 additions & 0 deletions nova/tests/virt/libvirt/test_libvirt.py
Expand Up @@ -55,6 +55,7 @@
from nova.openstack.common import uuidutils
from nova.pci import pci_manager
from nova import test
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_network
import nova.tests.image.fake
Expand Down Expand Up @@ -8808,6 +8809,29 @@ def setUp(self):
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()

@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
mock_refresh_connection_info):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
mock_get_by_volume_id.return_value = fake_bdm

self.conn._volume_refresh_connection_info(self.c, self.inst,
self.volume_uuid)

mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
self.conn._volume_api, self.conn)

def test_volume_snapshot_create(self, quiesce=True):
CONF.instance_name_template = 'instance-%s'
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
Expand Down Expand Up @@ -8882,6 +8906,13 @@ def test_volume_snapshot_create_outer_success(self):
self.conn._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')

self.mox.StubOutWithMock(self.conn._volume_api, 'get_snapshot')
self.conn._volume_api.get_snapshot(self.c,
self.create_info['snapshot_id']).AndReturn({'status': 'available'})
self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
self.conn._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)

self.mox.ReplayAll()

self.conn.volume_snapshot_create(self.c, instance, self.volume_uuid,
Expand Down Expand Up @@ -9001,6 +9032,10 @@ def test_volume_snapshot_delete_outer_success(self):
self.conn._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')

self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
self.conn._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)

self.mox.ReplayAll()

self.conn.volume_snapshot_delete(self.c, instance, self.volume_uuid,
Expand Down
21 changes: 21 additions & 0 deletions nova/virt/libvirt/driver.py
Expand Up @@ -70,6 +70,7 @@
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.objects import block_device as block_device_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import service as service_obj
Expand All @@ -89,6 +90,7 @@
from nova import rpc
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import cpu
from nova.virt.disk import api as disk
Expand Down Expand Up @@ -1784,6 +1786,13 @@ def _volume_snapshot_create(self, context, instance, domain,

raise

def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(context,
volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)

def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
Expand Down Expand Up @@ -1830,6 +1839,17 @@ def volume_snapshot_create(self, context, instance, volume_id,
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')

def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)

if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()

timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()

def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
Expand Down Expand Up @@ -1964,6 +1984,7 @@ def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
context, snapshot_id, 'error_deleting')

self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)

def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
Expand Down

0 comments on commit 329b594

Please sign in to comment.