Skip to content

Commit

Permalink
Refactor nova.volume.cinder.API to reduce roundtrips with Cinder
Browse files Browse the repository at this point in the history
Make cinder.API methods accept volume_id instead of the whole volume object.
This will remove redundant roundtrip to get the volume before
passing it to other methods as in fact they only need the id.

Fixes bug 1172297

Change-Id: I5e7f944c1c29b2f211ece2ef86c0959c81e806df
  • Loading branch information
Oleg Bondarev authored and markmc committed May 14, 2013
1 parent 28f0b01 commit 586e752
Show file tree
Hide file tree
Showing 13 changed files with 182 additions and 180 deletions.
10 changes: 4 additions & 6 deletions nova/api/ec2/cloud.py
Expand Up @@ -391,8 +391,8 @@ def create_snapshot(self, context, volume_id, **kwargs):
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
volume = self.volume_api.get(context, volume_id)
args = (context, volume, kwargs.get('name'), kwargs.get('description'))
args = (context, volume_id, kwargs.get('name'),
kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
Expand All @@ -403,8 +403,7 @@ def create_snapshot(self, context, volume_id, **kwargs):

def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
self.volume_api.delete_snapshot(context, snapshot)
self.volume_api.delete_snapshot(context, snapshot_id)
return True

def describe_key_pairs(self, context, key_name=None, **kwargs):
Expand Down Expand Up @@ -863,8 +862,7 @@ def delete_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.delete(context, volume)
self.volume_api.delete(context, volume_id)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Delete Failed'))

Expand Down
11 changes: 4 additions & 7 deletions nova/api/openstack/compute/contrib/volumes.py
Expand Up @@ -187,8 +187,7 @@ def delete(self, req, id):
LOG.audit(_("Delete volume with id: %s"), id, context=context)

try:
vol = self.volume_api.get(context, id)
self.volume_api.delete(context, vol)
self.volume_api.delete(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
Expand Down Expand Up @@ -573,8 +572,7 @@ def delete(self, req, id):
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)

try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
self.volume_api.delete_snapshot(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
return webob.Response(status_int=202)
Expand Down Expand Up @@ -610,7 +608,6 @@ def create(self, req, body):

snapshot = body['snapshot']
volume_id = snapshot['volume_id']
vol = self.volume_api.get(context, volume_id)

force = snapshot.get('force', False)
LOG.audit(_("Create snapshot from volume %s"), volume_id,
Expand All @@ -622,12 +619,12 @@ def create(self, req, body):

if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
vol,
volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
vol,
volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))

Expand Down
13 changes: 6 additions & 7 deletions nova/compute/api.py
Expand Up @@ -1182,18 +1182,17 @@ def _local_delete(self, context, instance, bdms):
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
self.volume_api.terminate_connection(context,
volume,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, volume)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, volume)
self.volume_api.delete(context, bdm['volume_id'])
self.db.block_device_mapping_destroy(context, bdm['id'])
instance = self._instance_update(context,
instance_uuid,
Expand Down Expand Up @@ -1613,7 +1612,7 @@ def snapshot_volume_backed(self, context, instance, image_meta, name,
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume, name, volume['display_description'])
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
del bdm['volume_id']

Expand Down Expand Up @@ -2306,7 +2305,7 @@ def attach_volume(self, context, instance, volume_id, device=None):
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
Expand All @@ -2321,7 +2320,7 @@ def _detach_volume(self, context, instance, volume):
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])

Expand Down
51 changes: 21 additions & 30 deletions nova/compute/manager.py
Expand Up @@ -793,7 +793,7 @@ def _setup_block_device_mapping(self, context, instance, bdms):
if bdm['volume_id'] is not None:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attach(context, volume,
instance=instance)
instance=instance)
cinfo = self._attach_volume_boot(context,
instance,
volume,
Expand Down Expand Up @@ -1316,12 +1316,11 @@ def _shutdown_instance(self, context, instance, bdms):
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell nova-volume that we are done with it.
volume = self.volume_api.get(context, bdm['volume_id'])
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
volume,
bdm['volume_id'],
connector)
self.volume_api.detach(context, volume)
self.volume_api.detach(context, bdm['volume_id'])
except exception.DiskNotFound as exc:
LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
instance=instance)
Expand All @@ -1336,8 +1335,7 @@ def _cleanup_volumes(self, context, instance_uuid, bdms):
LOG.debug(_("terminating bdm %s") % bdm,
instance_uuid=instance_uuid)
if bdm['volume_id'] and bdm['delete_on_termination']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.delete(context, volume)
self.volume_api.delete(context, bdm['volume_id'])
# NOTE(vish): bdms will be deleted on instance destroy

@hooks.add_hook("delete_instance")
Expand Down Expand Up @@ -1652,8 +1650,7 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
# NOTE(sirp): this detach is necessary b/c we will reattach the
# volumes in _prep_block_devices below.
for bdm in self._get_volume_bdms(bdms):
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.detach(context, volume)
self.volume_api.detach(context, bdm['volume_id'])

if not recreate:
block_device_info = self._get_volume_block_device_info(
Expand Down Expand Up @@ -1726,7 +1723,7 @@ def _handle_bad_volumes_detached(self, context, instance, bad_devices,
LOG.info(_("Detaching from volume api: %s") % volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume)
self.volume_api.begin_detaching(context, volume_id)

# Manager-detach
self.detach_volume(context, volume_id, instance)
Expand Down Expand Up @@ -2209,9 +2206,8 @@ def _refresh_block_device_connection_info(self, context, instance):
connector = self.driver.get_volume_connector(instance)

for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
cinfo = self.volume_api.initialize_connection(
context, volume, connector)
context, bdm['volume_id'], connector)

self.conductor_api.block_device_mapping_update(
context, bdm['id'],
Expand Down Expand Up @@ -2473,9 +2469,8 @@ def _terminate_volume_connections(self, context, instance):
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.terminate_connection(context, volume,
connector)
self.volume_api.terminate_connection(context, bdm['volume_id'],
connector)

def _finish_resize(self, context, instance, migration, disk_info,
image):
Expand Down Expand Up @@ -2880,9 +2875,9 @@ def _attach_volume_boot(self, context, instance, volume, mountpoint):
locals(), context=context, instance=instance)
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
volume_id,
connector)
self.volume_api.attach(context, volume, instance_uuid, mountpoint)
self.volume_api.attach(context, volume_id, instance_uuid, mountpoint)
return connection_info

@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
Expand Down Expand Up @@ -2925,22 +2920,21 @@ def attach_volume(self, context, volume_id, mountpoint, instance):
context, instance, mountpoint)

def _attach_volume(self, context, volume_id, mountpoint, instance):
volume = self.volume_api.get(context, volume_id)
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
locals(), context=context, instance=instance)
try:
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
volume_id,
connector)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Failed to connect to volume %(volume_id)s "
"while attaching at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
self.volume_api.unreserve_volume(context, volume)
self.volume_api.unreserve_volume(context, volume_id)

if 'serial' not in connection_info:
connection_info['serial'] = volume_id
Expand All @@ -2956,11 +2950,11 @@ def _attach_volume(self, context, volume_id, mountpoint, instance):
LOG.exception(msg % locals(), context=context,
instance=instance)
self.volume_api.terminate_connection(context,
volume,
volume_id,
connector)

self.volume_api.attach(context,
volume,
volume_id,
instance['uuid'],
mountpoint)
values = {
Expand Down Expand Up @@ -3001,8 +2995,7 @@ def _detach_volume(self, context, instance, bdm):
msg = _("Failed to detach volume %(volume_id)s from %(mp)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
volume = self.volume_api.get(context, volume_id)
self.volume_api.roll_detaching(context, volume)
self.volume_api.roll_detaching(context, volume_id)

@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
Expand Down Expand Up @@ -3031,10 +3024,9 @@ def detach_volume(self, context, volume_id, instance):
update_totals=True)

self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
self.volume_api.detach(context.elevated(), volume)
self.volume_api.terminate_connection(context, volume_id, connector)
self.volume_api.detach(context.elevated(), volume_id)
self.conductor_api.block_device_mapping_destroy_by_instance_and_volume(
context, instance, volume_id)

Expand All @@ -3047,9 +3039,8 @@ def remove_volume_connection(self, context, volume_id, instance):
try:
bdm = self._get_instance_volume_bdm(context, instance, volume_id)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass

Expand Down Expand Up @@ -3271,8 +3262,8 @@ def _post_live_migration(self, ctxt, instance_ref,

# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
volume = self.volume_api.get(ctxt, bdm['volume_id'])
self.volume_api.terminate_connection(ctxt, volume, connector)
self.volume_api.terminate_connection(ctxt, bdm['volume_id'],
connector)

# Releasing vlan.
# (not necessary in current implementation?)
Expand Down
10 changes: 5 additions & 5 deletions nova/tests/api/ec2/test_cinder_cloud.py
Expand Up @@ -371,12 +371,12 @@ def _block_device_mapping_create(self, instance_uuid, mappings):
**kwargs)
if 'snapshot_id' in values:
self.volume_api.create_snapshot(self.context,
vol,
vol['id'],
'snapshot-bdm',
'fake snap for bdm tests',
values['snapshot_id'])

self.volume_api.attach(self.context, vol,
self.volume_api.attach(self.context, vol['id'],
instance_uuid, bdm['device_name'])
volumes.append(vol)
return volumes
Expand Down Expand Up @@ -441,7 +441,7 @@ def _setUpBlockDeviceMapping(self):

def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
for vol in volumes:
self.volume_api.delete(self.context, vol)
self.volume_api.delete(self.context, vol['id'])
for uuid in (inst1['uuid'], inst2['uuid']):
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, uuid):
Expand Down Expand Up @@ -746,10 +746,10 @@ def test_stop_start_with_volume(self):
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
str(vol['id']) == str(vol2_uuid))
if str(vol['id']) == str(vol1_uuid):
self.volume_api.attach(self.context, vol,
self.volume_api.attach(self.context, vol['id'],
instance_uuid, '/dev/sdb')
elif str(vol['id']) == str(vol2_uuid):
self.volume_api.attach(self.context, vol,
self.volume_api.attach(self.context, vol['id'],
instance_uuid, '/dev/sdc')

vol = self.volume_api.get(self.context, vol1_uuid)
Expand Down
4 changes: 2 additions & 2 deletions nova/tests/api/openstack/compute/contrib/test_volumes.py
Expand Up @@ -199,7 +199,7 @@ def test_volume_show(self):
self.assertEqual(resp.status_int, 200)

def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get_notfound)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)

req = webob.Request.blank('/v2/fake/os-volumes/456')
resp = req.get_response(self.app)
Expand All @@ -212,7 +212,7 @@ def test_volume_delete(self):
self.assertEqual(resp.status_int, 202)

def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get_notfound)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)

req = webob.Request.blank('/v2/fake/os-volumes/456')
req.method = 'DELETE'
Expand Down
2 changes: 1 addition & 1 deletion nova/tests/api/openstack/compute/test_server_actions.py
Expand Up @@ -797,7 +797,7 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id):
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume,
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)

self.mox.ReplayAll()
Expand Down
10 changes: 5 additions & 5 deletions nova/tests/api/openstack/fakes.py
Expand Up @@ -571,7 +571,7 @@ def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)


def stub_volume_get_notfound(self, context, volume_id):
def stub_volume_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)


Expand Down Expand Up @@ -601,13 +601,13 @@ def stub_snapshot(id, **kwargs):
return snapshot


def stub_snapshot_create(self, context, volume, name, description):
return stub_snapshot(100, volume_id=volume['id'], display_name=name,
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)


def stub_snapshot_delete(self, context, snapshot):
if snapshot['id'] == '-1':
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.NotFound


Expand Down

0 comments on commit 586e752

Please sign in to comment.