Skip to content

Commit 1188dd9

Browse files
author
Eric Day
committed
Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two.
1 parent 07ee963 commit 1188dd9

File tree

8 files changed

+211
-238
lines changed

8 files changed

+211
-238
lines changed

nova/api/ec2/cloud.py

Lines changed: 28 additions & 146 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from nova import quota
4040
from nova import rpc
4141
from nova import utils
42-
from nova.compute.instance_types import INSTANCE_TYPES
42+
from nova.compute import instance_types
4343
from nova.api import cloud
4444
from nova.image.s3 import S3ImageService
4545

@@ -50,11 +50,6 @@
5050
InvalidInputException = exception.InvalidInputException
5151

5252

53-
class QuotaError(exception.ApiError):
54-
"""Quota Exceeeded"""
55-
pass
56-
57-
5853
def _gen_key(context, user_id, key_name):
5954
"""Generate a key
6055
@@ -127,7 +122,7 @@ def _get_mpi_data(self, context, project_id):
127122
for instance in db.instance_get_all_by_project(context, project_id):
128123
if instance['fixed_ip']:
129124
line = '%s slots=%d' % (instance['fixed_ip']['address'],
130-
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
125+
instance['vcpus'])
131126
key = str(instance['key_name'])
132127
if key in result:
133128
result[key].append(line)
@@ -260,7 +255,7 @@ def delete_key_pair(self, context, key_name, **kwargs):
260255
return True
261256

262257
def describe_security_groups(self, context, group_name=None, **kwargs):
263-
self._ensure_default_security_group(context)
258+
self.compute_manager.ensure_default_security_group(context)
264259
if context.user.is_admin():
265260
groups = db.security_group_get_all(context)
266261
else:
@@ -358,7 +353,7 @@ def _security_group_rule_exists(self, security_group, values):
358353
return False
359354

360355
def revoke_security_group_ingress(self, context, group_name, **kwargs):
361-
self._ensure_default_security_group(context)
356+
self.compute_manager.ensure_default_security_group(context)
362357
security_group = db.security_group_get_by_name(context,
363358
context.project_id,
364359
group_name)
@@ -383,7 +378,7 @@ def revoke_security_group_ingress(self, context, group_name, **kwargs):
383378
# for these operations, so support for newer API versions
384379
# is sketchy.
385380
def authorize_security_group_ingress(self, context, group_name, **kwargs):
386-
self._ensure_default_security_group(context)
381+
self.compute_manager.ensure_default_security_group(context)
387382
security_group = db.security_group_get_by_name(context,
388383
context.project_id,
389384
group_name)
@@ -419,7 +414,7 @@ def _get_source_project_id(self, context, source_security_group_owner_id):
419414
return source_project_id
420415

421416
def create_security_group(self, context, group_name, group_description):
422-
self._ensure_default_security_group(context)
417+
self.compute_manager.ensure_default_security_group(context)
423418
if db.security_group_exists(context, context.project_id, group_name):
424419
raise exception.ApiError('group %s already exists' % group_name)
425420

@@ -505,9 +500,8 @@ def create_volume(self, context, size, **kwargs):
505500
if quota.allowed_volumes(context, 1, size) < 1:
506501
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
507502
context.project_id, size)
508-
raise QuotaError("Volume quota exceeded. You cannot "
509-
"create a volume of size %s" %
510-
size)
503+
raise quota.QuotaError("Volume quota exceeded. You cannot "
504+
"create a volume of size %s" % size)
511505
vol = {}
512506
vol['size'] = size
513507
vol['user_id'] = context.user.id
@@ -699,8 +693,8 @@ def allocate_address(self, context, **kwargs):
699693
if quota.allowed_floating_ips(context, 1) < 1:
700694
logging.warn("Quota exceeeded for %s, tried to allocate address",
701695
context.project_id)
702-
raise QuotaError("Address quota exceeded. You cannot "
703-
"allocate any more addresses")
696+
raise quota.QuotaError("Address quota exceeded. You cannot "
697+
"allocate any more addresses")
704698
network_topic = self._get_network_topic(context)
705699
public_ip = rpc.call(context,
706700
network_topic,
@@ -752,137 +746,25 @@ def _get_network_topic(self, context):
752746
"args": {"network_id": network_ref['id']}})
753747
return db.queue_get_for(context, FLAGS.network_topic, host)
754748

755-
def _ensure_default_security_group(self, context):
756-
try:
757-
db.security_group_get_by_name(context,
758-
context.project_id,
759-
'default')
760-
except exception.NotFound:
761-
values = {'name': 'default',
762-
'description': 'default',
763-
'user_id': context.user.id,
764-
'project_id': context.project_id}
765-
group = db.security_group_create(context, values)
766-
767749
def run_instances(self, context, **kwargs):
768-
instance_type = kwargs.get('instance_type', 'm1.small')
769-
if instance_type not in INSTANCE_TYPES:
770-
raise exception.ApiError("Unknown instance type: %s",
771-
instance_type)
772-
# check quota
773-
max_instances = int(kwargs.get('max_count', 1))
774-
min_instances = int(kwargs.get('min_count', max_instances))
775-
num_instances = quota.allowed_instances(context,
776-
max_instances,
777-
instance_type)
778-
if num_instances < min_instances:
779-
logging.warn("Quota exceeeded for %s, tried to run %s instances",
780-
context.project_id, min_instances)
781-
raise QuotaError("Instance quota exceeded. You can only "
782-
"run %s more instances of this type." %
783-
num_instances, "InstanceLimitExceeded")
784-
# make sure user can access the image
785-
# vpn image is private so it doesn't show up on lists
786-
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
787-
788-
if not vpn:
789-
image = self.image_service.show(context, kwargs['image_id'])
790-
791-
# FIXME(ja): if image is vpn, this breaks
792-
# get defaults from imagestore
793-
image_id = image['imageId']
794-
kernel_id = image.get('kernelId', FLAGS.default_kernel)
795-
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
796-
797-
# API parameters overrides of defaults
798-
kernel_id = kwargs.get('kernel_id', kernel_id)
799-
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
800-
801-
# make sure we have access to kernel and ramdisk
802-
self.image_service.show(context, kernel_id)
803-
self.image_service.show(context, ramdisk_id)
804-
805-
logging.debug("Going to run %s instances...", num_instances)
806-
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
807-
key_data = None
808-
if 'key_name' in kwargs:
809-
key_pair_ref = db.key_pair_get(context,
810-
context.user.id,
811-
kwargs['key_name'])
812-
key_data = key_pair_ref['public_key']
813-
814-
security_group_arg = kwargs.get('security_group', ["default"])
815-
if not type(security_group_arg) is list:
816-
security_group_arg = [security_group_arg]
817-
818-
security_groups = []
819-
self._ensure_default_security_group(context)
820-
for security_group_name in security_group_arg:
821-
group = db.security_group_get_by_name(context,
822-
context.project_id,
823-
security_group_name)
824-
security_groups.append(group['id'])
825-
826-
reservation_id = utils.generate_uid('r')
827-
base_options = {}
828-
base_options['state_description'] = 'scheduling'
829-
base_options['image_id'] = image_id
830-
base_options['kernel_id'] = kernel_id
831-
base_options['ramdisk_id'] = ramdisk_id
832-
base_options['reservation_id'] = reservation_id
833-
base_options['key_data'] = key_data
834-
base_options['key_name'] = kwargs.get('key_name', None)
835-
base_options['user_id'] = context.user.id
836-
base_options['project_id'] = context.project_id
837-
base_options['user_data'] = kwargs.get('user_data', '')
838-
839-
base_options['display_name'] = kwargs.get('display_name')
840-
base_options['display_description'] = kwargs.get('display_description')
841-
842-
type_data = INSTANCE_TYPES[instance_type]
843-
base_options['instance_type'] = instance_type
844-
base_options['memory_mb'] = type_data['memory_mb']
845-
base_options['vcpus'] = type_data['vcpus']
846-
base_options['local_gb'] = type_data['local_gb']
847-
elevated = context.elevated()
848-
849-
for num in range(num_instances):
850-
851-
instance_ref = self.compute_manager.create_instance(context,
852-
security_groups,
853-
mac_address=utils.generate_mac(),
854-
launch_index=num,
855-
**base_options)
856-
inst_id = instance_ref['id']
857-
858-
internal_id = instance_ref['internal_id']
859-
ec2_id = internal_id_to_ec2_id(internal_id)
860-
861-
self.compute_manager.update_instance(context,
862-
inst_id,
863-
hostname=ec2_id)
864-
865-
# TODO(vish): This probably should be done in the scheduler
866-
# or in compute as a call. The network should be
867-
# allocated after the host is assigned and setup
868-
# can happen at the same time.
869-
address = self.network_manager.allocate_fixed_ip(context,
870-
inst_id,
871-
vpn)
872-
network_topic = self._get_network_topic(context)
873-
rpc.cast(elevated,
874-
network_topic,
875-
{"method": "setup_fixed_ip",
876-
"args": {"address": address}})
877-
878-
rpc.cast(context,
879-
FLAGS.scheduler_topic,
880-
{"method": "run_instance",
881-
"args": {"topic": FLAGS.compute_topic,
882-
"instance_id": inst_id}})
883-
logging.debug("Casting to scheduler for %s/%s's instance %s" %
884-
(context.project.name, context.user.name, inst_id))
885-
return self._format_run_instances(context, reservation_id)
750+
max_count = int(kwargs.get('max_count', 1))
751+
instances = self.compute_manager.create_instances(context,
752+
instance_types.get_by_type(kwargs.get('instance_type', None)),
753+
self.image_service,
754+
kwargs['image_id'],
755+
self._get_network_topic(context),
756+
min_count=int(kwargs.get('min_count', max_count)),
757+
max_count=max_count,
758+
kernel_id=kwargs.get('kernel_id'),
759+
ramdisk_id=kwargs.get('ramdisk_id'),
760+
name=kwargs.get('display_name'),
761+
description=kwargs.get('display_description'),
762+
user_data=kwargs.get('user_data', ''),
763+
key_name=kwargs.get('key_name'),
764+
security_group=kwargs.get('security_group'),
765+
generate_hostname=internal_id_to_ec2_id)
766+
return self._format_run_instances(context,
767+
instances[0]['reservation_id'])
886768

887769
def terminate_instances(self, context, instance_id, **kwargs):
888770
"""Terminate each instance in instance_id, which is a list of ec2 ids.

nova/api/openstack/servers.py

Lines changed: 12 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -140,22 +140,23 @@ def delete(self, req, id):
140140

141141
def create(self, req):
142142
""" Creates a new server for a given user """
143-
144143
env = self._deserialize(req.body, req)
145144
if not env:
146145
return faults.Fault(exc.HTTPUnprocessableEntity())
147146

148-
#try:
149-
inst = self._build_server_instance(req, env)
150-
#except Exception, e:
151-
# return faults.Fault(exc.HTTPUnprocessableEntity())
152-
153147
user_id = req.environ['nova.context']['user']['id']
154-
rpc.cast(context.RequestContext(user_id, user_id),
155-
FLAGS.compute_topic,
156-
{"method": "run_instance",
157-
"args": {"instance_id": inst['id']}})
158-
return _entity_inst(inst)
148+
ctxt = context.RequestContext(user_id, user_id)
149+
key_pair = self.db_driver.key_pair_get_all_by_user(None, user_id)[0]
150+
instances = self.compute_manager.create_instances(ctxt,
151+
instance_types.get_by_flavor_id(env['server']['flavorId']),
152+
utils.import_object(FLAGS.image_service),
153+
env['server']['imageId'],
154+
self._get_network_topic(ctxt),
155+
name=env['server']['name'],
156+
description=env['server']['name'],
157+
key_name=key_pair['name'],
158+
key_data=key_pair['public_key'])
159+
return _entity_inst(instances[0])
159160

160161
def update(self, req, id):
161162
""" Updates the server name or password """
@@ -191,79 +192,6 @@ def action(self, req, id):
191192
return faults.Fault(exc.HTTPUnprocessableEntity())
192193
cloud.reboot(id)
193194

194-
def _build_server_instance(self, req, env):
195-
"""Build instance data structure and save it to the data store."""
196-
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
197-
inst = {}
198-
199-
user_id = req.environ['nova.context']['user']['id']
200-
ctxt = context.RequestContext(user_id, user_id)
201-
202-
flavor_id = env['server']['flavorId']
203-
204-
instance_type, flavor = [(k, v) for k, v in
205-
instance_types.INSTANCE_TYPES.iteritems()
206-
if v['flavorid'] == flavor_id][0]
207-
208-
image_id = env['server']['imageId']
209-
img_service = utils.import_object(FLAGS.image_service)
210-
211-
image = img_service.show(image_id)
212-
213-
if not image:
214-
raise Exception("Image not found")
215-
216-
inst['server_name'] = env['server']['name']
217-
inst['image_id'] = image_id
218-
inst['user_id'] = user_id
219-
inst['launch_time'] = ltime
220-
inst['mac_address'] = utils.generate_mac()
221-
inst['project_id'] = user_id
222-
223-
inst['state_description'] = 'scheduling'
224-
inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel)
225-
inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk)
226-
inst['reservation_id'] = utils.generate_uid('r')
227-
228-
inst['display_name'] = env['server']['name']
229-
inst['display_description'] = env['server']['name']
230-
231-
#TODO(dietz) this may be ill advised
232-
key_pair_ref = self.db_driver.key_pair_get_all_by_user(
233-
None, user_id)[0]
234-
235-
inst['key_data'] = key_pair_ref['public_key']
236-
inst['key_name'] = key_pair_ref['name']
237-
238-
#TODO(dietz) stolen from ec2 api, see TODO there
239-
inst['security_group'] = 'default'
240-
241-
# Flavor related attributes
242-
inst['instance_type'] = instance_type
243-
inst['memory_mb'] = flavor['memory_mb']
244-
inst['vcpus'] = flavor['vcpus']
245-
inst['local_gb'] = flavor['local_gb']
246-
inst['mac_address'] = utils.generate_mac()
247-
inst['launch_index'] = 0
248-
249-
ref = self.compute_manager.create_instance(ctxt, **inst)
250-
inst['id'] = ref['internal_id']
251-
252-
inst['hostname'] = str(ref['internal_id'])
253-
self.compute_manager.update_instance(ctxt, inst['id'], **inst)
254-
255-
address = self.network_manager.allocate_fixed_ip(ctxt,
256-
inst['id'])
257-
258-
# TODO(vish): This probably should be done in the scheduler
259-
# network is setup when host is assigned
260-
network_topic = self._get_network_topic(ctxt)
261-
rpc.call(ctxt,
262-
network_topic,
263-
{"method": "setup_fixed_ip",
264-
"args": {"address": address}})
265-
return inst
266-
267195
def _get_network_topic(self, context):
268196
"""Retrieves the network host for a project"""
269197
network_ref = self.network_manager.get_network(context)

nova/compute/instance_types.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,29 @@
2121
The built-in instance properties.
2222
"""
2323

24+
from nova import flags
25+
26+
FLAGS = flags.FLAGS
2427
INSTANCE_TYPES = {
2528
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
2629
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
2730
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
2831
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
2932
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
33+
34+
35+
def get_by_type(instance_type):
36+
"""Build instance data structure and save it to the data store."""
37+
if instance_type is None:
38+
return FLAGS.default_instance_type
39+
if instance_type not in INSTANCE_TYPES:
40+
raise exception.ApiError("Unknown instance type: %s",
41+
instance_type)
42+
return instance_type
43+
44+
45+
def get_by_flavor_id(flavor_id):
46+
for instance_type, details in INSTANCE_TYPES.iteritems():
47+
if details['flavorid'] == flavor_id:
48+
return instance_type
49+
return FLAGS.default_instance_type

0 commit comments

Comments
 (0)