Skip to content

Commit

Permalink
Send 'create volume from snapshot' to the proper host
Browse files Browse the repository at this point in the history
A simple solution for bug 1008866. When creating volume from snapshot on
multicluster, in volume it will check if snapshot_id is set. If snapshot_id
is set, make the call create volume directly to the volume host where the
snapshot resides instead of passing it through the scheduler. So snapshot can
be copy to new volume.

Change-Id: Ie9c1a77f62abc40e294b1d0c604cf885652728da
  • Loading branch information
zhurongze committed Aug 3, 2012
1 parent 0be1725 commit 99456bd
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 6 deletions.
40 changes: 34 additions & 6 deletions cinder/volume/api.py
Expand Up @@ -26,6 +26,7 @@

from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder.openstack.common import rpc
import cinder.policy
Expand All @@ -34,7 +35,12 @@
from cinder import utils
from cinder.db import base

volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host where snapshot resides')

FLAGS = flags.FLAGS
FLAGS.register_opt(volume_host_opt)
flags.DECLARE('storage_availability_zone', 'cinder.volume.manager')

LOG = logging.getLogger(__name__)
Expand Down Expand Up @@ -113,14 +119,36 @@ def create(self, context, size, name, description, snapshot=None,
}

volume = self.db.volume_create(context, options)
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "create_volume",
"args": {"topic": FLAGS.volume_topic,
"volume_id": volume['id'],
"snapshot_id": snapshot_id}})
self._cast_create_volume(context, volume['id'], snapshot_id)
return volume

def _cast_create_volume(self, context, volume_id, snapshot_id):

# NOTE(Rongze Zhu): It is a simple solution for bug 1008866
# If snapshot_id is set, make the call create volume directly to
# the volume host where the snapshot resides instead of passing it
# through the scheduer. So snapshot can be copy to new volume.

if snapshot_id and FLAGS.snapshot_same_host:
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
src_volume_ref = self.db.volume_get(context,
snapshot_ref['volume_id'])
topic = rpc.queue_get_for(context,
FLAGS.volume_topic,
src_volume_ref['host'])
rpc.cast(context,
topic,
{"method": "create_volume",
"args": {"volume_id": volume_id,
"snapshot_id": snapshot_id}})
else:
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "create_volume",
"args": {"topic": FLAGS.volume_topic,
"volume_id": volume_id,
"snapshot_id": snapshot_id}})

# TODO(yamahata): eliminate dumb polling
def wait_creation(self, context, volume):
volume_id = volume['id']
Expand Down
5 changes: 5 additions & 0 deletions etc/cinder/cinder.conf.sample
Expand Up @@ -564,6 +564,11 @@
###### (IntOpt) maximum number of volume gigabytes to allow per host
# max_gigabytes=10000

######## defined in cinder.volume.api ########

# snapshot_same_host=true
#### (BoolOpt) Create volume form snapshot at the host where snapshot resides.

######### defined in cinder.volume.driver #########

###### (StrOpt) iscsi target user-land tool to use
Expand Down

0 comments on commit 99456bd

Please sign in to comment.