Skip to content

Commit

Permalink
APIs should not wait on scheduler for builds in single zone deployment
Browse files Browse the repository at this point in the history
Fixes bug 885349

We can short circuit waiting on the scheduler if we're in a single zone
deployment and only building 1 instance.  This patch checks for that
case and creates the instance DB entry directly in the API (in
compute/api) without the call to the scheduler.

Change-Id: I98b27156167f057d11fbc56a9ff99d4e2ec423d3
  • Loading branch information
comstud committed Nov 2, 2011
1 parent 830760b commit 0571385
Show file tree
Hide file tree
Showing 7 changed files with 38 additions and 40 deletions.
5 changes: 1 addition & 4 deletions nova/api/ec2/cloud.py
Expand Up @@ -1437,10 +1437,7 @@ def run_instances(self, context, **kwargs):
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
block_device_mapping=kwargs.get('block_device_mapping', {}),
# NOTE(comstud): Unfortunately, EC2 requires that the
# instance DB entries have been created..
wait_for_instances=True)
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context, resv_id)

def _do_instance(self, action, context, ec2_id):
Expand Down
3 changes: 1 addition & 2 deletions nova/api/openstack/servers.py
Expand Up @@ -462,8 +462,7 @@ def create(self, req, body):
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
wait_for_instances=not ret_resv_id)
block_device_mapping=block_device_mapping)
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.InstanceTypeMemoryTooSmall as error:
Expand Down
35 changes: 25 additions & 10 deletions nova/compute/api.py
Expand Up @@ -45,6 +45,7 @@


FLAGS = flags.FLAGS
flags.DECLARE('enable_zone_routing', 'nova.scheduler.api')
flags.DECLARE('vncproxy_topic', 'nova.vnc')
flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
Expand Down Expand Up @@ -189,8 +190,7 @@ def _create_instance(self, context, instance_type,
injected_files, admin_password, zone_blob,
reservation_id, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
wait_for_instances):
block_device_mapping, create_instance_here=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation."""
Expand Down Expand Up @@ -325,10 +325,18 @@ def _create_instance(self, context, instance_type,

LOG.debug(_("Going to run %s instances...") % num_instances)

if wait_for_instances:
rpc_method = rpc.call
else:
if create_instance_here:
instance = self.create_db_entry_for_new_instance(
context, instance_type, image, base_options,
security_group, block_device_mapping)
# Tells scheduler we created the instance already.
base_options['id'] = instance['id']
rpc_method = rpc.cast
else:
# We need to wait for the scheduler to create the instance
# DB entries, because the instance *could* be # created in
# a child zone.
rpc_method = rpc.call

# TODO(comstud): We should use rpc.multicall when we can
# retrieve the full instance dictionary from the scheduler.
Expand All @@ -344,6 +352,8 @@ def _create_instance(self, context, instance_type,
num_instances, requested_networks,
block_device_mapping, security_group)

if create_instance_here:
return ([instance], reservation_id)
return (instances, reservation_id)

@staticmethod
Expand Down Expand Up @@ -534,8 +544,7 @@ def create(self, context, instance_type,
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None, block_device_mapping=None,
access_ip_v4=None, access_ip_v6=None,
requested_networks=None, config_drive=None,
wait_for_instances=True):
requested_networks=None, config_drive=None):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
Expand All @@ -546,6 +555,13 @@ def create(self, context, instance_type,
we waited for information from the scheduler or not.
"""

# We can create the DB entry for the instance here if we're
# only going to create 1 instance and we're in a single
# zone deployment. This speeds up API responses for builds
# as we don't need to wait for the scheduler.
create_instance_here = (max_count == 1 and
not FLAGS.enable_zone_routing)

(instances, reservation_id) = self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
Expand All @@ -557,10 +573,9 @@ def create(self, context, instance_type,
reservation_id, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
wait_for_instances)
create_instance_here=create_instance_here)

if instances is None:
# wait_for_instances must have been False
if create_instance_here or instances is None:
return (instances, reservation_id)

inst_ret_list = []
Expand Down
3 changes: 3 additions & 0 deletions nova/scheduler/driver.py
Expand Up @@ -155,6 +155,9 @@ def hosts_up(self, context, topic):
def create_instance_db_entry(self, context, request_spec):
"""Create instance DB entry based on request_spec"""
base_options = request_spec['instance_properties']
if base_options.get('id'):
# Instance was already created before calling scheduler
return db.instance_get(context, base_options['id'])
image = request_spec['image']
instance_type = request_spec.get('instance_type')
security_group = request_spec.get('security_group', 'default')
Expand Down
5 changes: 3 additions & 2 deletions nova/tests/api/ec2/test_cloud.py
Expand Up @@ -1013,8 +1013,6 @@ def test_describe_image_mapping(self):
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')

self.stubs.UnsetAll()

def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute

Expand Down Expand Up @@ -1216,6 +1214,9 @@ def fake_show(self, context, id):

self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)

result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
Expand Down
8 changes: 5 additions & 3 deletions nova/tests/api/openstack/test_servers.py
Expand Up @@ -1354,7 +1354,7 @@ def instance_create(context, inst):
self.instance_cache_num += 1
instance = {
'id': self.instance_cache_num,
'display_name': 'server_test',
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
Expand Down Expand Up @@ -1390,8 +1390,10 @@ def rpc_call_wrapper(context, topic, msg):
request_spec['instance_properties']))
return instances

def server_update(context, id, params):
return instance_create(context, id)
def server_update(context, instance_id, params):
inst = self.instance_cache[instance_id]
inst.update(params)
return inst

def fake_method(*args, **kwargs):
pass
Expand Down
19 changes: 0 additions & 19 deletions nova/tests/test_compute.py
Expand Up @@ -1409,25 +1409,6 @@ def test_reservation_ids_two_instances(self):
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, instance['id'])

def test_reservation_ids_two_instances_no_wait(self):
"""Verify building 2 instances at once without waiting for
instance IDs results in a reservation_id being returned equal
to reservation id set in both instances
"""
(refs, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
min_count=2, max_count=2, wait_for_instances=False)
try:
self.assertEqual(refs, None)
self.assertNotEqual(resv_id, None)
finally:
instances = self.compute_api.get_all(self.context,
search_opts={'reservation_id': resv_id})
self.assertEqual(len(instances), 2)
for instance in instances:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, instance['id'])

def test_create_with_specified_reservation_id(self):
"""Verify building instances with a specified
reservation_id results in the correct reservation_id
Expand Down

0 comments on commit 0571385

Please sign in to comment.