From 91b35b22e753a4eb0c22c004bb12586970a95e11 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 4 May 2015 11:05:06 -0700 Subject: [PATCH] Drop compute RPC 3.x support This drops all the things we no longer need when not supporting the compute RPC 3.x interface. The major changes are removing of the run_instance() path and the mass of tests that hit that route. There are also quite a few tests that still depend on object_compat decorations on compute manager methods, which require some changes here. Change-Id: I60a89bf7884dbcb066c63101168b359013fe832b --- nova/compute/manager.py | 738 +------------------- nova/compute/rpcapi.py | 344 +++------ nova/tests/unit/compute/test_compute.py | 489 +++---------- nova/tests/unit/compute/test_compute_mgr.py | 148 +--- nova/tests/unit/compute/test_rpcapi.py | 563 --------------- nova/tests/unit/virt/xenapi/test_xenapi.py | 14 +- 6 files changed, 219 insertions(+), 2077 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e72be0174cc..174b14597aa 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -83,7 +83,6 @@ from nova import rpc from nova import safe_utils from nova.scheduler import client as scheduler_client -from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import configdrive @@ -453,22 +452,6 @@ def _load_instance(instance_or_dict): return decorated_function -# TODO(danms): Remove me after Icehouse -def aggregate_object_compat(function): - """Wraps a method that expects a new-world aggregate.""" - - @functools.wraps(function) - def decorated_function(self, context, *args, **kwargs): - aggregate = kwargs.get('aggregate') - if isinstance(aggregate, dict): - aggregate = objects.Aggregate._from_db_object( - context.elevated(), objects.Aggregate(), - aggregate) - kwargs['aggregate'] = aggregate - return function(self, context, *args, **kwargs) - return decorated_function - - class InstanceEvents(object): def __init__(self): self._events = {} @@ -660,7 +643,7 @@ def wait_for_instance_event(self, instance, event_names, deadline=300, class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" - target = messaging.Target(version='3.40') + target = messaging.Target(version='4.0') # How long to wait in seconds before re-issuing a shutdown # signal to a instance during power off. The overall @@ -685,7 +668,6 @@ def __init__(self, compute_driver=None, *args, **kwargs): openstack_driver.is_neutron_security_groups()) self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() - self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.scheduler_client = scheduler_client.SchedulerClient() self._resource_tracker_dict = {} self.instance_events = InstanceEvents() @@ -700,7 +682,6 @@ def __init__(self, compute_driver=None, *args, **kwargs): super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) - self.additional_endpoints.append(_ComputeV4Proxy(self)) # NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully @@ -1086,7 +1067,7 @@ def _init_instance(self, context, instance): LOG.debug("Instance in transitional state %s at start-up " "retrying stop request", instance.task_state, instance=instance) - self.stop_instance(context, instance) + self.stop_instance(context, instance, True) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to stop instance') @@ -1420,67 +1401,6 @@ def _decode(f): return [_decode(f) for f in injected_files] - def _run_instance(self, context, request_spec, - filter_properties, requested_networks, injected_files, - admin_password, is_first_time, node, instance, - legacy_bdm_in_spec): - """Launch a new instance with specified options.""" - - extra_usage_info = {} - - def notify(status, msg="", fault=None, **kwargs): - """Send a create.{start,error,end} notification.""" - type_ = "create.%(status)s" % dict(status=status) - info = extra_usage_info.copy() - info['message'] = msg - self._notify_about_instance_usage(context, instance, type_, - extra_usage_info=info, fault=fault, **kwargs) - - try: - self._prebuild_instance(context, instance) - - if request_spec and request_spec.get('image'): - image_meta = request_spec['image'] - else: - image_meta = {} - - extra_usage_info = {"image_name": image_meta.get('name', '')} - - notify("start") # notify that build is starting - - instance, network_info = self._build_instance(context, - request_spec, filter_properties, requested_networks, - injected_files, admin_password, is_first_time, node, - instance, image_meta, legacy_bdm_in_spec) - notify("end", msg=_("Success"), network_info=network_info) - - except exception.RescheduledException as e: - # Instance build encountered an error, and has been rescheduled. - notify("error", fault=e) - - except exception.BuildAbortException as e: - # Instance build aborted due to a non-failure - LOG.info(e) - notify("end", msg=e.format_message()) # notify that build is done - - except Exception as e: - # Instance build encountered a non-recoverable error: - with excutils.save_and_reraise_exception(): - self._set_instance_error_state(context, instance) - notify("error", fault=e) # notify that build failed - - def _prebuild_instance(self, context, instance): - self._check_instance_exists(context, instance) - - try: - self._start_building(context, instance) - except (exception.InstanceNotFound, - exception.UnexpectedDeletingTaskStateError): - msg = _("Instance disappeared before we could start it") - # Quickly bail out of here - raise exception.BuildAbortException(instance_uuid=instance.uuid, - reason=msg) - def _validate_instance_group_policy(self, context, instance, filter_properties): # NOTE(russellb) Instance group policy is enforced by the scheduler. @@ -1518,188 +1438,10 @@ def _do_validation(context, instance, group_hint): _do_validation(context, instance, group_hint) - def _build_instance(self, context, request_spec, filter_properties, - requested_networks, injected_files, admin_password, is_first_time, - node, instance, image_meta, legacy_bdm_in_spec): - original_context = context - context = context.elevated() - - # NOTE(danms): This method is deprecated, but could be called, - # and if it is, it will have an old megatuple for requested_networks. - if requested_networks is not None: - requested_networks_obj = objects.NetworkRequestList( - objects=[objects.NetworkRequest.from_tuple(t) - for t in requested_networks]) - else: - requested_networks_obj = None - - # If neutron security groups pass requested security - # groups to allocate_for_instance() - if request_spec and self.is_neutron_security_groups: - security_groups = request_spec.get('security_group') - else: - security_groups = [] - - if node is None: - node = self.driver.get_available_nodes(refresh=True)[0] - LOG.debug("No node specified, defaulting to %s", node) - - network_info = None - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - - # b64 decode the files to inject: - injected_files_orig = injected_files - injected_files = self._decode_files(injected_files) - - rt = self._get_resource_tracker(node) - try: - limits = filter_properties.get('limits', {}) - with rt.instance_claim(context, instance, limits) as inst_claim: - # NOTE(russellb) It's important that this validation be done - # *after* the resource tracker instance claim, as that is where - # the host is set on the instance. - self._validate_instance_group_policy(context, instance, - filter_properties) - macs = self.driver.macs_for_instance(instance) - dhcp_options = self.driver.dhcp_options_for_instance(instance) - - network_info = self._allocate_network(original_context, - instance, requested_networks_obj, macs, - security_groups, dhcp_options) - - # Verify that all the BDMs have a device_name set and assign a - # default to the ones missing it with the help of the driver. - self._default_block_device_names(context, instance, image_meta, - bdms) - - instance.vm_state = vm_states.BUILDING - instance.task_state = task_states.BLOCK_DEVICE_MAPPING - instance.numa_topology = inst_claim.claimed_numa_topology - instance.save() - - block_device_info = self._prep_block_device( - context, instance, bdms) - - set_access_ip = (is_first_time and - not instance.access_ip_v4 and - not instance.access_ip_v6) - - instance = self._spawn(context, instance, image_meta, - network_info, block_device_info, - injected_files, admin_password, - set_access_ip=set_access_ip) - except (exception.InstanceNotFound, - exception.UnexpectedDeletingTaskStateError): - # the instance got deleted during the spawn - # Make sure the async call finishes - if network_info is not None: - network_info.wait(do_raise=False) - try: - self._deallocate_network(context, instance) - except Exception: - msg = _LE('Failed to dealloc network ' - 'for deleted instance') - LOG.exception(msg, instance=instance) - raise exception.BuildAbortException( - instance_uuid=instance.uuid, - reason=_("Instance disappeared during build")) - except (exception.UnexpectedTaskStateError, - exception.VirtualInterfaceCreateException) as e: - # Don't try to reschedule, just log and reraise. - with excutils.save_and_reraise_exception(): - LOG.debug(e.format_message(), instance=instance) - # Make sure the async call finishes - if network_info is not None: - network_info.wait(do_raise=False) - except exception.InvalidBDM: - with excutils.save_and_reraise_exception(): - if network_info is not None: - network_info.wait(do_raise=False) - try: - self._deallocate_network(context, instance) - except Exception: - msg = _LE('Failed to dealloc network ' - 'for failed instance') - LOG.exception(msg, instance=instance) - except Exception: - exc_info = sys.exc_info() - # try to re-schedule instance: - # Make sure the async call finishes - if network_info is not None: - network_info.wait(do_raise=False) - rescheduled = self._reschedule_or_error(original_context, instance, - exc_info, requested_networks, admin_password, - injected_files_orig, is_first_time, request_spec, - filter_properties, bdms, legacy_bdm_in_spec) - if rescheduled: - # log the original build error - self._log_original_error(exc_info, instance.uuid) - raise exception.RescheduledException( - instance_uuid=instance.uuid, - reason=six.text_type(exc_info[1])) - else: - # not re-scheduling, go to error: - raise exc_info[0], exc_info[1], exc_info[2] - - # spawn success - return instance, network_info - def _log_original_error(self, exc_info, instance_uuid): LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid, exc_info=exc_info) - def _reschedule_or_error(self, context, instance, exc_info, - requested_networks, admin_password, injected_files, is_first_time, - request_spec, filter_properties, bdms=None, - legacy_bdm_in_spec=True): - """Try to re-schedule the build or re-raise the original build error to - error out the instance. - """ - original_context = context - context = context.elevated() - - instance_uuid = instance.uuid - rescheduled = False - - compute_utils.add_instance_fault_from_exc(context, - instance, exc_info[1], exc_info=exc_info) - self._notify_about_instance_usage(context, instance, - 'instance.create.error', fault=exc_info[1]) - - try: - LOG.debug("Clean up resource before rescheduling.", - instance=instance) - if bdms is None: - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - - self._shutdown_instance(context, instance, - bdms, requested_networks) - self._cleanup_volumes(context, instance.uuid, bdms) - except Exception: - # do not attempt retry if clean up failed: - with excutils.save_and_reraise_exception(): - self._log_original_error(exc_info, instance_uuid) - - try: - method_args = (request_spec, admin_password, injected_files, - requested_networks, is_first_time, filter_properties, - legacy_bdm_in_spec) - task_state = task_states.SCHEDULING - - rescheduled = self._reschedule(original_context, request_spec, - filter_properties, instance, - self.scheduler_rpcapi.run_instance, method_args, - task_state, exc_info) - - except Exception: - rescheduled = False - LOG.exception(_LE("Error trying to reschedule"), - instance_uuid=instance_uuid) - - return rescheduled - def _reschedule(self, context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info=None): @@ -1757,16 +1499,6 @@ def _check_instance_exists(self, context, instance): if self.driver.instance_exists(instance): raise exception.InstanceExists(name=instance.name) - def _start_building(self, context, instance): - """Save the host and launched_on fields and log appropriately.""" - LOG.info(_LI('Starting instance...'), context=context, - instance=instance) - self._instance_update(context, instance.uuid, - vm_state=vm_states.BUILDING, - task_state=None, - expected_task_state=(task_states.SCHEDULING, - None)) - def _allocate_network_async(self, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options): """Method used to allocate networks in the background. @@ -2152,8 +1884,6 @@ def _get_instance_block_device_info(self, context, instance, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} - # NOTE(mikal): No object_compat wrapper on this method because its - # callers all pass objects already @wrap_exception() @reverts_task_state @wrap_instance_fault @@ -2163,28 +1893,6 @@ def build_and_run_instance(self, context, instance, image, request_spec, security_groups=None, block_device_mapping=None, node=None, limits=None): - # NOTE(danms): Remove this in v4.0 of the RPC API - if (requested_networks and - not isinstance(requested_networks, - objects.NetworkRequestList)): - requested_networks = objects.NetworkRequestList( - objects=[objects.NetworkRequest.from_tuple(t) - for t in requested_networks]) - # NOTE(melwitt): Remove this in v4.0 of the RPC API - flavor = filter_properties.get('instance_type') - if flavor and not isinstance(flavor, objects.Flavor): - # Code downstream may expect extra_specs to be populated since it - # is receiving an object, so lookup the flavor to ensure this. - flavor = objects.Flavor.get_by_id(context, flavor['id']) - filter_properties = dict(filter_properties, instance_type=flavor) - - # NOTE(sahid): Remove this in v4.0 of the RPC API - if (limits and 'numa_topology' in limits and - isinstance(limits['numa_topology'], six.string_types)): - db_obj = jsonutils.loads(limits['numa_topology']) - limits['numa_topology'] = ( - objects.NUMATopologyLimits.obj_from_db_obj(db_obj)) - @utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, **kwargs): # NOTE(danms): We grab the semaphore with the instance uuid @@ -2513,34 +2221,6 @@ def _cleanup_allocated_networks(self, context, instance, # exception will be raised by instance.save() pass - @object_compat - @messaging.expected_exceptions(exception.BuildAbortException, - exception.UnexpectedTaskStateError, - exception.VirtualInterfaceCreateException, - exception.RescheduledException) - @wrap_exception() - @reverts_task_state - @wrap_instance_event - @wrap_instance_fault - def run_instance(self, context, instance, request_spec, - filter_properties, requested_networks, - injected_files, admin_password, - is_first_time, node, legacy_bdm_in_spec): - # NOTE(alaski) This method should be deprecated when the scheduler and - # compute rpc interfaces are bumped to 4.x, and slated for removal in - # 5.x as it is no longer used. - - if filter_properties is None: - filter_properties = {} - - @utils.synchronized(instance.uuid) - def do_run_instance(): - self._run_instance(context, request_spec, - filter_properties, requested_networks, injected_files, - admin_password, is_first_time, node, instance, - legacy_bdm_in_spec) - do_run_instance() - def _try_deallocate_network(self, context, instance, requested_networks=None): try: @@ -2735,16 +2415,6 @@ def _delete_instance(self, context, instance, bdms, quotas): @wrap_instance_fault def terminate_instance(self, context, instance, bdms, reservations): """Terminate an instance on this host.""" - # NOTE (ndipanov): If we get non-object BDMs, just get them from the - # db again, as this means they are sent in the old format and we want - # to avoid converting them back when we can just get them. - # Remove this when we bump the RPC major version to 4.0 - if (bdms and - any(not isinstance(bdm, obj_base.NovaObject) - for bdm in bdms)): - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @@ -2773,7 +2443,7 @@ def do_terminate_instance(instance, bdms): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def stop_instance(self, context, instance, clean_shutdown=True): + def stop_instance(self, context, instance, clean_shutdown): """Stopping an instance on this host.""" @utils.synchronized(instance.uuid) @@ -2869,7 +2539,6 @@ def soft_delete_instance(self, context, instance, reservations): quotas.commit() self._notify_about_instance_usage(context, instance, "soft_delete.end") - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -2919,7 +2588,6 @@ def _rebuild_default_impl(self, context, instance, image_meta, admin_password, network_info=network_info, block_device_info=new_block_device_info) - @object_compat @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported) @wrap_exception() @reverts_task_state @@ -2950,14 +2618,6 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref, partition must be preserved on rebuild """ context = context.elevated() - # NOTE (ndipanov): If we get non-object BDMs, just get them from the - # db again, as this means they are sent in the old format and we want - # to avoid converting them back when we can just get them. - # Remove this on the next major RPC version bump - if (bdms and - any(not isinstance(bdm, obj_base.NovaObject) - for bdm in bdms)): - bdms = None orig_vm_state = instance.vm_state with self._error_out_instance_on_exception(context, instance): @@ -3081,7 +2741,7 @@ def detach_block_devices(context, bdms): instance.task_state = task_states.POWERING_OFF instance.progress = 0 instance.save() - self.stop_instance(context, instance) + self.stop_instance(context, instance, False) self._update_scheduler_instance_info(context, instance) self._notify_about_instance_usage( context, instance, "rebuild.end", @@ -3321,7 +2981,6 @@ def update_task_state(task_state, def _post_interrupted_snapshot_cleanup(self, context, instance): self.driver.post_interrupted_snapshot_cleanup(context, instance) - @object_compat @messaging.expected_exceptions(NotImplementedError) @wrap_exception() def volume_snapshot_create(self, context, instance, volume_id, @@ -3329,7 +2988,6 @@ def volume_snapshot_create(self, context, instance, volume_id, self.driver.volume_snapshot_create(context, instance, volume_id, create_info) - @object_compat @messaging.expected_exceptions(NotImplementedError) @wrap_exception() def volume_snapshot_delete(self, context, instance, volume_id, @@ -3375,7 +3033,6 @@ def _rotate_backups(self, context, instance, backup_type, rotation): instance=instance) self.image_api.delete(context, image_id) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -3486,13 +3143,12 @@ def _get_rescue_image(self, context, instance, rescue_image_ref=None): image_meta['id'] = rescue_image_ref return image_meta - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rescue_instance(self, context, instance, rescue_password, - rescue_image_ref=None, clean_shutdown=True): + rescue_image_ref, clean_shutdown): context = context.elevated() LOG.info(_LI('Rescuing'), context=context, instance=instance) @@ -3536,7 +3192,6 @@ def rescue_instance(self, context, instance, rescue_password, "rescue.end", extra_usage_info=extra_usage_info, network_info=network_info) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @@ -3562,7 +3217,6 @@ def unrescue_instance(self, context, instance): "unrescue.end", network_info=network_info) - @object_compat @wrap_exception() @wrap_instance_fault def change_instance_metadata(self, context, diff, instance): @@ -3842,7 +3496,8 @@ def finish_revert_resize(self, context, instance, reservations, migration): else: instance.task_state = task_states.POWERING_OFF instance.save() - self.stop_instance(context, instance=instance) + self.stop_instance(context, instance=instance, + clean_shutdown=True) self._notify_about_instance_usage( context, instance, "resize.revert.end") @@ -3898,7 +3553,7 @@ def _prep_resize(self, context, image, instance, instance_type, @wrap_instance_fault def prep_resize(self, context, image, instance, instance_type, reservations, request_spec, filter_properties, node, - clean_shutdown=True): + clean_shutdown): """Initiates the process of moving a running instance to another host. Possibly changes the RAM and disk size in the process. @@ -3993,7 +3648,7 @@ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, @wrap_instance_fault def resize_instance(self, context, instance, image, reservations, migration, instance_type, - clean_shutdown=True): + clean_shutdown): """Starts the migration of a running instance to another host.""" quotas = objects.Quotas.from_reservations(context, @@ -4175,7 +3830,6 @@ def finish_resize(self, context, disk_info, image, instance, instance=instance) self._set_instance_error_state(context, instance) - @object_compat @wrap_exception() @wrap_instance_fault def add_fixed_ip_to_instance(self, context, network_id, instance): @@ -4199,7 +3853,6 @@ def add_fixed_ip_to_instance(self, context, network_id, instance): self._notify_about_instance_usage( context, instance, "create_ip.end", network_info=network_info) - @object_compat @wrap_exception() @wrap_instance_fault def remove_fixed_ip_from_instance(self, context, address, instance): @@ -4277,7 +3930,6 @@ def get_host_uptime(self, context): """Returns the result of calling "uptime" on the target host.""" return self.driver.get_host_uptime() - @object_compat @wrap_exception() @wrap_instance_fault def get_diagnostics(self, context, instance): @@ -4367,7 +4019,7 @@ def resume_instance(self, context, instance): @wrap_instance_event @wrap_instance_fault def shelve_instance(self, context, instance, image_id, - clean_shutdown=True): + clean_shutdown): """Shelve an instance. This should be used when you want to take a snapshot of the instance. @@ -4419,7 +4071,7 @@ def update_task_state(task_state, expected_state=task_states.SHELVING): @wrap_exception() @reverts_task_state @wrap_instance_fault - def shelve_offload_instance(self, context, instance, clean_shutdown=True): + def shelve_offload_instance(self, context, instance, clean_shutdown): """Remove a shelved instance from the hypervisor. This frees up those resources for use by other instances, but may lead @@ -4461,7 +4113,7 @@ def shelve_offload_instance(self, context, instance, clean_shutdown=True): @wrap_instance_event @wrap_instance_fault def unshelve_instance(self, context, instance, image, - filter_properties=None, node=None): + filter_properties, node): """Unshelve the instance. :param context: request context @@ -4563,7 +4215,6 @@ def inject_network_info(self, context, instance): network_info = self.network_api.get_instance_nw_info(context, instance) self._inject_network_info(context, instance, network_info) - @object_compat @messaging.expected_exceptions(NotImplementedError, exception.InstanceNotFound) @wrap_exception() @@ -4591,7 +4242,6 @@ def _tail_log(self, log, length): else: return '\n'.join(log.split('\n')[-int(length):]) - @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, @@ -4629,7 +4279,6 @@ def get_vnc_console(self, context, console_type, instance): return connect_info - @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, @@ -4666,7 +4315,6 @@ def get_spice_console(self, context, console_type, instance): return connect_info - @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, @@ -4740,7 +4388,6 @@ def get_serial_console(self, context, console_type, instance): @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound) - @object_compat @wrap_exception() @wrap_instance_fault def validate_console_port(self, ctxt, instance, port, console_type): @@ -4755,17 +4402,11 @@ def validate_console_port(self, ctxt, instance, port, console_type): return console_info.port == port - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def reserve_block_device_name(self, context, instance, device, - volume_id, disk_bus=None, device_type=None, - return_bdm_object=False): - # NOTE(ndipanov): disk_bus and device_type will be set to None if not - # passed (by older clients) and defaulted by the virt driver. Remove - # default values on the next major RPC version bump. - + volume_id, disk_bus, device_type): @utils.synchronized(instance.uuid) def do_reserve(): bdms = ( @@ -4785,23 +4426,15 @@ def do_reserve(): disk_bus=disk_bus, device_type=device_type) bdm.create() - if return_bdm_object: - return bdm - else: - return device_name + return bdm return do_reserve() - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault - def attach_volume(self, context, volume_id, mountpoint, - instance, bdm=None): + def attach_volume(self, context, instance, bdm): """Attach a volume to an instance.""" - if not bdm: - bdm = objects.BlockDeviceMapping.get_by_volume_id( - context, volume_id) driver_bdm = driver_block_device.convert_volume(bdm) @utils.synchronized(instance.uuid) @@ -4875,7 +4508,6 @@ def _detach_volume(self, context, instance, bdm): context=context, instance=instance) self.volume_api.roll_detaching(context, volume_id) - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault @@ -5017,14 +4649,6 @@ def remove_volume_connection(self, context, volume_id, instance): # detached, or delete the bdm, just remove the # connection from this host. - # NOTE(PhilDay): Can't use object_compat decorator here as - # instance is not the second parameter - if isinstance(instance, dict): - metas = ['metadata', 'system_metadata'] - instance = objects.Instance._from_db_object( - context, objects.Instance(), instance, - expected_attrs=metas) - instance._context = context try: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) @@ -5034,7 +4658,6 @@ def remove_volume_connection(self, context, volume_id, instance): except exception.NotFound: pass - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault @@ -5071,7 +4694,6 @@ def attach_interface(self, context, instance, network_id, port_id, return network_info[0] - @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault @@ -5179,7 +4801,6 @@ def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): dest_check_data, block_device_info) - @object_compat @wrap_exception() @wrap_instance_fault def pre_live_migration(self, context, instance, block_migration, disk, @@ -5242,16 +4863,6 @@ def live_migration(self, context, dest, instance, block_migration, """ - # NOTE(danms): since instance is not the first parameter, we can't - # use @object_compat on this method. Since this is the only example, - # we do this manually instead of complicating the decorator - if not isinstance(instance, obj_base.NovaObject): - expected = ['metadata', 'system_metadata', - 'security_groups', 'info_cache'] - instance = objects.Instance._from_db_object( - context, objects.Instance(), instance, - expected_attrs=expected) - # Create a local copy since we'll be modifying the dictionary migrate_data = dict(migrate_data or {}) try: @@ -5439,7 +5050,6 @@ def _clean_instance_console_tokens(self, ctxt, instance): self.consoleauth_rpcapi.delete_tokens_for_instance( ctxt, instance.uuid) - @object_compat @wrap_exception() @wrap_instance_fault def post_live_migration_at_destination(self, context, instance, @@ -5544,12 +5154,11 @@ def _rollback_live_migration(self, context, instance, self._notify_about_instance_usage(context, instance, "live_migration._rollback.end") - @object_compat @wrap_exception() @wrap_instance_fault def rollback_live_migration_at_destination(self, context, instance, - destroy_disks=True, - migrate_data=None): + destroy_disks, + migrate_data): """Cleaning up image directory that is created pre_live_migration. :param context: security context @@ -6469,7 +6078,6 @@ def _error_out_instance_on_exception(self, context, instance, quotas.rollback() self._set_instance_error_state(context, instance) - @aggregate_object_compat @wrap_exception() def add_aggregate_host(self, context, aggregate, host, slave_info): """Notify hypervisor of change (for hypervisor pools).""" @@ -6486,7 +6094,6 @@ def add_aggregate_host(self, context, aggregate, host, slave_info): aggregate.delete_host, aggregate, host) - @aggregate_object_compat @wrap_exception() def remove_aggregate_host(self, context, host, slave_info, aggregate): """Removes a host from a physical hypervisor pool.""" @@ -6631,314 +6238,3 @@ def unquiesce_instance(self, context, instance, mapping=None): image_meta = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) self.driver.unquiesce(context, instance, image_meta) - - -# TODO(danms): This goes away immediately in Lemming and is just -# present in Kilo so that we can receive v3.x and v4.0 messages -class _ComputeV4Proxy(object): - - target = messaging.Target(version='4.0') - - def __init__(self, manager): - self.manager = manager - - def add_aggregate_host(self, ctxt, aggregate, host, slave_info=None): - return self.manager.add_aggregate_host(ctxt, aggregate, host, - slave_info=slave_info) - - def add_fixed_ip_to_instance(self, ctxt, network_id, instance): - return self.manager.add_fixed_ip_to_instance(ctxt, - network_id, - instance) - - def attach_interface(self, ctxt, instance, network_id, port_id, - requested_ip): - return self.manager.attach_interface(ctxt, instance, network_id, - port_id, requested_ip) - - def attach_volume(self, ctxt, instance, bdm): - # NOTE(danms): In 3.x, attach_volume had mountpoint and volume_id - # parameters, which are gone from 4.x. Provide None for each to - # the 3.x manager above and remove in Lemming. - return self.manager.attach_volume(ctxt, None, None, - instance=instance, - bdm=bdm) - - def change_instance_metadata(self, ctxt, instance, diff): - return self.manager.change_instance_metadata( - ctxt, diff=diff, instance=instance) - - def check_can_live_migrate_destination(self, ctxt, instance, - block_migration, disk_over_commit): - return self.manager.check_can_live_migrate_destination( - ctxt, instance, block_migration, disk_over_commit) - - def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): - return self.manager.check_can_live_migrate_source(ctxt, instance, - dest_check_data) - - def check_instance_shared_storage(self, ctxt, instance, data): - return self.manager.check_instance_shared_storage(ctxt, instance, data) - - def confirm_resize(self, ctxt, instance, reservations, migration): - return self.manager.confirm_resize(ctxt, instance, - reservations, migration) - - def detach_interface(self, ctxt, instance, port_id): - return self.manager.detach_interface(ctxt, instance, port_id) - - def detach_volume(self, ctxt, volume_id, instance): - # NOTE(danms): Pass instance by kwarg to help the object_compat - # decorator, as real RPC dispatch does. - return self.manager.detach_volume(ctxt, volume_id, instance=instance) - - def finish_resize(self, ctxt, disk_info, image, instance, - reservations, migration): - return self.manager.finish_resize(ctxt, disk_info, image, instance, - reservations, migration) - - def finish_revert_resize(self, ctxt, instance, - reservations, migration): - return self.manager.finish_revert_resize(ctxt, instance, - reservations, migration) - - def get_console_output(self, ctxt, instance, tail_length): - return self.manager.get_console_output(ctxt, instance, tail_length) - - def get_console_pool_info(self, ctxt, console_type): - return self.manager.get_console_pool_info(ctxt, console_type) - - def get_console_topic(self, ctxt): - return self.manager.get_console_topic(ctxt) - - def get_diagnostics(self, ctxt, instance): - return self.manager.get_diagnostics(ctxt, instance) - - def get_instance_diagnostics(self, ctxt, instance): - return self.manager.get_instance_diagnostics(ctxt, instance) - - def get_vnc_console(self, ctxt, console_type, instance): - return self.manager.get_vnc_console(ctxt, console_type, instance) - - def get_spice_console(self, ctxt, console_type, instance): - return self.manager.get_spice_console(ctxt, console_type, instance) - - def get_rdp_console(self, ctxt, console_type, instance): - return self.manager.get_rdp_console(ctxt, console_type, instance) - - def get_serial_console(self, ctxt, console_type, instance): - return self.manager.get_serial_console(ctxt, console_type, instance) - - def validate_console_port(self, ctxt, instance, port, console_type): - return self.manager.validate_console_port(ctxt, instance, port, - console_type) - - def host_maintenance_mode(self, ctxt, host, mode): - return self.manager.host_maintenance_mode(ctxt, host, mode) - - def host_power_action(self, ctxt, action): - return self.manager.host_power_action(ctxt, action) - - def inject_network_info(self, ctxt, instance): - return self.manager.inject_network_info(ctxt, instance) - - def live_migration(self, ctxt, dest, instance, block_migration, - migrate_data=None): - return self.manager.live_migration(ctxt, dest, instance, - block_migration, - migrate_data=migrate_data) - - def pause_instance(self, ctxt, instance): - return self.manager.pause_instance(ctxt, instance) - - def post_live_migration_at_destination(self, ctxt, instance, - block_migration): - return self.manager.post_live_migration_at_destination( - ctxt, instance, block_migration) - - def pre_live_migration(self, ctxt, instance, block_migration, disk, - migrate_data=None): - return self.manager.pre_live_migration(ctxt, instance, block_migration, - disk, migrate_data=migrate_data) - - def prep_resize(self, ctxt, image, instance, instance_type, - reservations=None, request_spec=None, - filter_properties=None, node=None, clean_shutdown=True): - return self.manager.prep_resize(ctxt, image, instance, instance_type, - reservations=reservations, - request_spec=request_spec, - filter_properties=filter_properties, - node=node, - clean_shutdown=clean_shutdown) - - def reboot_instance(self, ctxt, instance, block_device_info, reboot_type): - return self.manager.reboot_instance(ctxt, instance, block_device_info, - reboot_type) - - def rebuild_instance(self, ctxt, instance, orig_image_ref, image_ref, - injected_files, new_pass, orig_sys_metadata, - bdms, recreate, on_shared_storage, - preserve_ephemeral=False): - return self.manager.rebuild_instance( - ctxt, instance, orig_image_ref, image_ref, - injected_files, new_pass, orig_sys_metadata, - bdms, recreate, on_shared_storage, - preserve_ephemeral=preserve_ephemeral) - - def refresh_security_group_rules(self, ctxt, security_group_id): - return self.manager.refresh_security_group_rules(ctxt, - security_group_id) - - def refresh_security_group_members(self, ctxt, security_group_id): - return self.manager.refresh_security_group_members(ctxt, - security_group_id) - - def refresh_instance_security_rules(self, ctxt, instance): - return self.manager.refresh_instance_security_rules(ctxt, instance) - - def refresh_provider_fw_rules(self, ctxt): - return self.manager.refresh_provider_fw_rules(ctxt) - - def remove_aggregate_host(self, ctxt, host, slave_info, aggregate): - return self.manager.remove_aggregate_host(ctxt, - host, slave_info, - aggregate) - - def remove_fixed_ip_from_instance(self, ctxt, address, instance): - return self.manager.remove_fixed_ip_from_instance(ctxt, address, - instance) - - def remove_volume_connection(self, ctxt, instance, volume_id): - return self.manager.remove_volume_connection(ctxt, instance, volume_id) - - def rescue_instance(self, ctxt, instance, rescue_password, - rescue_image_ref, clean_shutdown): - return self.manager.rescue_instance(ctxt, instance, rescue_password, - rescue_image_ref=rescue_image_ref, - clean_shutdown=clean_shutdown) - - def reset_network(self, ctxt, instance): - return self.manager.reset_network(ctxt, instance) - - def resize_instance(self, ctxt, instance, image, - reservations, migration, instance_type, - clean_shutdown=True): - return self.manager.resize_instance(ctxt, instance, image, - reservations, migration, - instance_type, - clean_shutdown=clean_shutdown) - - def resume_instance(self, ctxt, instance): - return self.manager.resume_instance(ctxt, instance) - - def revert_resize(self, ctxt, instance, migration, reservations=None): - return self.manager.revert_resize(ctxt, instance, migration, - reservations=reservations) - - def rollback_live_migration_at_destination(self, ctxt, instance, - destroy_disks, - migrate_data): - return self.manager.rollback_live_migration_at_destination( - ctxt, instance, destroy_disks=destroy_disks, - migrate_data=migrate_data) - - def set_admin_password(self, ctxt, instance, new_pass): - return self.manager.set_admin_password(ctxt, instance, new_pass) - - def set_host_enabled(self, ctxt, enabled): - return self.manager.set_host_enabled(ctxt, enabled) - - def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id): - return self.manager.swap_volume(ctxt, old_volume_id, new_volume_id, - instance) - - def get_host_uptime(self, ctxt): - return self.manager.get_host_uptime(ctxt) - - def reserve_block_device_name(self, ctxt, instance, device, volume_id, - disk_bus=None, device_type=None): - return self.manager.reserve_block_device_name(ctxt, instance, device, - volume_id, - disk_bus=disk_bus, - device_type=device_type, - return_bdm_object=True) - - def backup_instance(self, ctxt, image_id, instance, backup_type, - rotation): - return self.manager.backup_instance(ctxt, image_id, instance, - backup_type, rotation) - - def snapshot_instance(self, ctxt, image_id, instance): - return self.manager.snapshot_instance(ctxt, image_id, instance) - - def start_instance(self, ctxt, instance): - return self.manager.start_instance(ctxt, instance) - - def stop_instance(self, ctxt, instance, clean_shutdown): - return self.manager.stop_instance(ctxt, instance, clean_shutdown) - - def suspend_instance(self, ctxt, instance): - return self.manager.suspend_instance(ctxt, instance) - - def terminate_instance(self, ctxt, instance, bdms, reservations=None): - return self.manager.terminate_instance(ctxt, instance, bdms, - reservations=reservations) - - def unpause_instance(self, ctxt, instance): - return self.manager.unpause_instance(ctxt, instance) - - def unrescue_instance(self, ctxt, instance): - return self.manager.unrescue_instance(ctxt, instance) - - def soft_delete_instance(self, ctxt, instance, reservations): - return self.manager.soft_delete_instance(ctxt, instance, reservations) - - def restore_instance(self, ctxt, instance): - return self.manager.restore_instance(ctxt, instance) - - def shelve_instance(self, ctxt, instance, image_id=None, - clean_shutdown=True): - return self.manager.shelve_instance(ctxt, instance, image_id=image_id, - clean_shutdown=clean_shutdown) - - def shelve_offload_instance(self, ctxt, instance, clean_shutdown): - return self.manager.shelve_offload_instance(ctxt, instance, - clean_shutdown) - - def unshelve_instance(self, ctxt, instance, image=None, - filter_properties=None, node=None): - return self.manager.unshelve_instance( - ctxt, instance, image=image, - filter_properties=filter_properties, - node=node) - - def volume_snapshot_create(self, ctxt, instance, volume_id, create_info): - return self.manager.volume_snapshot_create(ctxt, instance, volume_id, - create_info) - - def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id, - delete_info): - return self.manager.volume_snapshot_delete(ctxt, instance, volume_id, - snapshot_id, delete_info) - - def external_instance_event(self, ctxt, instances, events): - return self.manager.external_instance_event(ctxt, instances, events) - - def build_and_run_instance(self, ctxt, instance, image, request_spec, - filter_properties, admin_password=None, - injected_files=None, requested_networks=None, - security_groups=None, block_device_mapping=None, - node=None, limits=None): - return self.manager.build_and_run_instance( - ctxt, instance, image, request_spec, filter_properties, - admin_password=admin_password, injected_files=injected_files, - requested_networks=requested_networks, - security_groups=security_groups, - block_device_mapping=block_device_mapping, - node=node, limits=limits) - - def quiesce_instance(self, ctxt, instance): - return self.manager.quiesce_instance(ctxt, instance) - - def unquiesce_instance(self, ctxt, instance, mapping=None): - return self.manager.unquiesce_instance(ctxt, instance, mapping=mapping) diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index acc2a1ffc8a..b2583bb899e 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -22,11 +22,10 @@ from oslo_serialization import jsonutils from nova import exception -from nova.i18n import _, _LW +from nova.i18n import _ from nova import objects from nova.objects import base as objects_base from nova import rpc -from nova import utils rpcapi_opts = [ cfg.StrOpt('compute_topic', @@ -336,14 +335,14 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host, parameter for the remote method. :param host: This is the host to send the message to. ''' - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'add_aggregate_host', aggregate=aggregate, host=host_param, slave_info=slave_info) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): - version = self._compat_ver('4.0', '3.12') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'add_fixed_ip_to_instance', @@ -351,7 +350,7 @@ def add_fixed_ip_to_instance(self, ctxt, instance, network_id): def attach_interface(self, ctxt, instance, network_id, port_id, requested_ip): - version = self._compat_ver('4.0', '3.17') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'attach_interface', @@ -360,57 +359,21 @@ def attach_interface(self, ctxt, instance, network_id, port_id, def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None): kw = {'instance': instance, 'bdm': bdm} - if self.client.can_send_version('4.0'): - version = '4.0' - else: - version = '3.16' - kw['mountpoint'] = mountpoint - kw['volume_id'] = volume_id + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'attach_volume', **kw) def change_instance_metadata(self, ctxt, instance, diff): - version = self._compat_ver('4.0', '3.7') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'change_instance_metadata', instance=instance, diff=diff) - def _warn_buggy_live_migrations(self, data=None): - # NOTE(danms): We know that libvirt live migration with shared block - # storage was buggy (potential loss of data) before version 3.32. - # Since we need to support live migration with older clients, we need - # to warn the operator of this possibility. The logic below tries to - # decide if a warning should be emitted, assuming the positive if - # not sure. This can be removed when we bump to RPC API version 4.0. - if data: - if data.get('is_shared_block_storage') is not False: - # Shared block storage, or unknown - should_warn = True - else: - # Specifically not shared block storage - should_warn = False - else: - # Unknown, so warn to be safe - should_warn = True - - if should_warn: - LOG.warning(_LW('Live migration with clients before RPC version ' - '3.32 is known to be buggy with shared block ' - 'storage. See ' - 'https://bugs.launchpad.net/nova/+bug/1250751 for ' - 'more information!')) - def check_can_live_migrate_destination(self, ctxt, instance, destination, block_migration, disk_over_commit): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.32'): - version = '3.32' - else: - version = '3.0' - self._warn_buggy_live_migrations() + version = '4.0' cctxt = self.client.prepare(server=destination, version=version) return cctxt.call(ctxt, 'check_can_live_migrate_destination', instance=instance, @@ -418,13 +381,7 @@ def check_can_live_migrate_destination(self, ctxt, instance, destination, disk_over_commit=disk_over_commit) def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.32'): - version = '3.32' - else: - version = '3.0' - self._warn_buggy_live_migrations() + version = '4.0' source = _compute_host(None, instance) cctxt = self.client.prepare(server=source, version=version) return cctxt.call(ctxt, 'check_can_live_migrate_source', @@ -432,13 +389,7 @@ def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): dest_check_data=dest_check_data) def check_instance_shared_storage(self, ctxt, instance, data, host=None): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.29'): - version = '3.29' - else: - version = '3.0' - instance = jsonutils.to_primitive(instance) + version = '4.0' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) return cctxt.call(ctxt, 'check_instance_shared_storage', @@ -447,7 +398,7 @@ def check_instance_shared_storage(self, ctxt, instance, data, host=None): def confirm_resize(self, ctxt, instance, migration, host, reservations=None, cast=True): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) rpc_method = cctxt.cast if cast else cctxt.call @@ -456,20 +407,14 @@ def confirm_resize(self, ctxt, instance, migration, host, reservations=reservations) def detach_interface(self, ctxt, instance, port_id): - version = self._compat_ver('4.0', '3.17') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'detach_interface', instance=instance, port_id=port_id) def detach_volume(self, ctxt, instance, volume_id): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.25'): - version = '3.25' - else: - version = '3.0' - instance = jsonutils.to_primitive(instance) + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'detach_volume', @@ -477,7 +422,7 @@ def detach_volume(self, ctxt, instance, volume_id): def finish_resize(self, ctxt, instance, migration, image, disk_info, host, reservations=None): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'finish_resize', instance=instance, migration=migration, @@ -485,80 +430,75 @@ def finish_resize(self, ctxt, instance, migration, image, disk_info, def finish_revert_resize(self, ctxt, instance, migration, host, reservations=None): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'finish_revert_resize', instance=instance, migration=migration, reservations=reservations) def get_console_output(self, ctxt, instance, tail_length): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.28'): - version = '3.28' - else: - version = '3.0' - instance = jsonutils.to_primitive(instance) + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_console_output', instance=instance, tail_length=tail_length) def get_console_pool_info(self, ctxt, console_type, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'get_console_pool_info', console_type=console_type) def get_console_topic(self, ctxt, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'get_console_topic') def get_diagnostics(self, ctxt, instance): - version = self._compat_ver('4.0', '3.18') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_diagnostics', instance=instance) def get_instance_diagnostics(self, ctxt, instance): + # TODO(danms): This needs to be fixed for objects instance_p = jsonutils.to_primitive(instance) kwargs = {'instance': instance_p} - version = self._compat_ver('4.0', '3.31') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs) def get_vnc_console(self, ctxt, instance, console_type): - version = self._compat_ver('4.0', '3.2') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_vnc_console', instance=instance, console_type=console_type) def get_spice_console(self, ctxt, instance, console_type): - version = self._compat_ver('4.0', '3.1') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_spice_console', instance=instance, console_type=console_type) def get_rdp_console(self, ctxt, instance, console_type): - version = self._compat_ver('4.0', '3.10') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_rdp_console', instance=instance, console_type=console_type) def get_serial_console(self, ctxt, instance, console_type): - version = self._compat_ver('4.0', '3.34') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_serial_console', instance=instance, console_type=console_type) def validate_console_port(self, ctxt, instance, port, console_type): - version = self._compat_ver('4.0', '3.3') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'validate_console_port', @@ -574,52 +514,46 @@ def host_maintenance_mode(self, ctxt, host_param, mode, host): :param mode: :param host: This is the host to send the message to. ''' - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'host_maintenance_mode', host=host_param, mode=mode) def host_power_action(self, ctxt, action, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'host_power_action', action=action) def inject_network_info(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'inject_network_info', instance=instance) def live_migration(self, ctxt, instance, dest, block_migration, host, migrate_data=None): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.26'): - version = '3.26' - else: - version = '3.0' - instance = jsonutils.to_primitive(instance) + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'live_migration', instance=instance, dest=dest, block_migration=block_migration, migrate_data=migrate_data) def pause_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'pause_instance', instance=instance) def post_live_migration_at_destination(self, ctxt, instance, block_migration, host): - version = self._compat_ver('4.0', '3.14') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'post_live_migration_at_destination', instance=instance, block_migration=block_migration) def pre_live_migration(self, ctxt, instance, block_migration, disk, host, migrate_data=None): - version = self._compat_ver('4.0', '3.19') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'pre_live_migration', instance=instance, @@ -630,6 +564,7 @@ def prep_resize(self, ctxt, image, instance, instance_type, host, reservations=None, request_spec=None, filter_properties=None, node=None, clean_shutdown=True): + # TODO(danms): This needs to be fixed for objects! instance_type_p = jsonutils.to_primitive(instance_type) image_p = jsonutils.to_primitive(image) msg_args = {'instance': instance, @@ -640,19 +575,13 @@ def prep_resize(self, ctxt, image, instance, instance_type, host, 'filter_properties': filter_properties, 'node': node, 'clean_shutdown': clean_shutdown} - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.38'): - version = '3.38' - else: - del msg_args['clean_shutdown'] - version = '3.0' + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'prep_resize', **msg_args) def reboot_instance(self, ctxt, instance, block_device_info, reboot_type): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'reboot_instance', @@ -667,7 +596,7 @@ def rebuild_instance(self, ctxt, instance, new_pass, injected_files, # NOTE(danms): kwargs is only here for cells compatibility, don't # actually send it to compute extra = {'preserve_ephemeral': preserve_ephemeral} - version = self._compat_ver('4.0', '3.21') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'rebuild_instance', @@ -679,7 +608,7 @@ def rebuild_instance(self, ctxt, instance, new_pass, injected_files, **extra) def refresh_provider_fw_rules(self, ctxt, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'refresh_provider_fw_rules') @@ -693,85 +622,66 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host, parameter for the remote method. :param host: This is the host to send the message to. ''' - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'remove_aggregate_host', aggregate=aggregate, host=host_param, slave_info=slave_info) def remove_fixed_ip_from_instance(self, ctxt, instance, address): - version = self._compat_ver('4.0', '3.13') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'remove_fixed_ip_from_instance', instance=instance, address=address) def remove_volume_connection(self, ctxt, instance, volume_id, host): - if self.client.can_send_version('4.0'): - version = '4.0' - elif self.client.can_send_version('3.30'): - version = '3.30' - else: - version = '3.0' - instance = jsonutils.to_primitive(instance) + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'remove_volume_connection', instance=instance, volume_id=volume_id) def rescue_instance(self, ctxt, instance, rescue_password, rescue_image_ref=None, clean_shutdown=True): - msg_args = {'rescue_password': rescue_password} - if self.client.can_send_version('4.0'): - version = '4.0' - msg_args['clean_shutdown'] = clean_shutdown - msg_args['rescue_image_ref'] = rescue_image_ref - elif self.client.can_send_version('3.37'): - version = '3.37' - msg_args['clean_shutdown'] = clean_shutdown - msg_args['rescue_image_ref'] = rescue_image_ref - elif self.client.can_send_version('3.24'): - version = '3.24' - msg_args['rescue_image_ref'] = rescue_image_ref - else: - version = '3.9' - msg_args['instance'] = instance + version = '4.0' + msg_args = {'rescue_password': rescue_password, + 'clean_shutdown': clean_shutdown, + 'rescue_image_ref': rescue_image_ref, + 'instance': instance, + } cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'rescue_instance', **msg_args) def reset_network(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'reset_network', instance=instance) def resize_instance(self, ctxt, instance, migration, image, instance_type, reservations=None, clean_shutdown=True): + # TODO(danms): This needs to be fixed for objects! instance_type_p = jsonutils.to_primitive(instance_type) msg_args = {'instance': instance, 'migration': migration, 'image': image, 'reservations': reservations, - 'instance_type': instance_type_p} - if self.client.can_send_version('4.0'): - version = '4.0' - msg_args['clean_shutdown'] = clean_shutdown - elif self.client.can_send_version('3.37'): - version = '3.37' - msg_args['clean_shutdown'] = clean_shutdown - else: - version = '3.0' + 'instance_type': instance_type_p, + 'clean_shutdown': clean_shutdown, + } + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resize_instance', **msg_args) def resume_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resume_instance', instance=instance) def revert_resize(self, ctxt, instance, migration, host, reservations=None): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'revert_resize', @@ -781,38 +691,28 @@ def revert_resize(self, ctxt, instance, migration, host, def rollback_live_migration_at_destination(self, ctxt, instance, host, destroy_disks=True, migrate_data=None): - if self.client.can_send_version('4.0'): - version = '4.0' - extra = {'destroy_disks': destroy_disks, - 'migrate_data': migrate_data, - } - elif self.client.can_send_version('3.32'): - version = '3.32' - extra = {'destroy_disks': destroy_disks, - 'migrate_data': migrate_data, - } - else: - version = '3.0' - extra = {} - self._warn_buggy_live_migrations(migrate_data) + version = '4.0' + extra = {'destroy_disks': destroy_disks, + 'migrate_data': migrate_data, + } cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'rollback_live_migration_at_destination', instance=instance, **extra) def set_admin_password(self, ctxt, instance, new_pass): - version = self._compat_ver('4.0', '3.8') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'set_admin_password', instance=instance, new_pass=new_pass) def set_host_enabled(self, ctxt, enabled, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled) def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'swap_volume', @@ -820,7 +720,7 @@ def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id): new_volume_id=new_volume_id) def get_host_uptime(self, ctxt, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'get_host_uptime') @@ -828,15 +728,8 @@ def reserve_block_device_name(self, ctxt, instance, device, volume_id, disk_bus=None, device_type=None): kw = {'instance': instance, 'device': device, 'volume_id': volume_id, 'disk_bus': disk_bus, - 'device_type': device_type, 'return_bdm_object': True} - if self.client.can_send_version('4.0'): - version = '4.0' - del kw['return_bdm_object'] - elif self.client.can_send_version('3.35'): - version = '3.35' - else: - del kw['return_bdm_object'] - version = '3.16' + 'device_type': device_type} + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) @@ -848,7 +741,7 @@ def reserve_block_device_name(self, ctxt, instance, device, volume_id, def backup_instance(self, ctxt, instance, image_id, backup_type, rotation): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'backup_instance', @@ -858,7 +751,7 @@ def backup_instance(self, ctxt, instance, image_id, backup_type, rotation=rotation) def snapshot_instance(self, ctxt, instance, image_id): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'snapshot_instance', @@ -866,34 +759,28 @@ def snapshot_instance(self, ctxt, instance, image_id): image_id=image_id) def start_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'start_instance', instance=instance) def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True): - msg_args = {'instance': instance} - if self.client.can_send_version('4.0'): - version = '4.0' - msg_args['clean_shutdown'] = clean_shutdown - elif self.client.can_send_version('3.37'): - version = '3.37' - msg_args['clean_shutdown'] = clean_shutdown - else: - version = '3.0' + msg_args = {'instance': instance, + 'clean_shutdown': clean_shutdown} + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) rpc_method = cctxt.cast if do_cast else cctxt.call return rpc_method(ctxt, 'stop_instance', **msg_args) def suspend_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'suspend_instance', instance=instance) def terminate_instance(self, ctxt, instance, bdms, reservations=None): - version = self._compat_ver('4.0', '3.22') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'terminate_instance', @@ -901,63 +788,50 @@ def terminate_instance(self, ctxt, instance, bdms, reservations=None): reservations=reservations) def unpause_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unpause_instance', instance=instance) def unrescue_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.11') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unrescue_instance', instance=instance) def soft_delete_instance(self, ctxt, instance, reservations=None): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'soft_delete_instance', instance=instance, reservations=reservations) def restore_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.20') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'restore_instance', instance=instance) def shelve_instance(self, ctxt, instance, image_id=None, clean_shutdown=True): - msg_args = {'instance': instance, 'image_id': image_id} - if self.client.can_send_version('4.0'): - version = '4.0' - msg_args['clean_shutdown'] = clean_shutdown - elif self.client.can_send_version('3.37'): - version = '3.37' - msg_args['clean_shutdown'] = clean_shutdown - else: - version = '3.0' + msg_args = {'instance': instance, 'image_id': image_id, + 'clean_shutdown': clean_shutdown} + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'shelve_instance', **msg_args) def shelve_offload_instance(self, ctxt, instance, clean_shutdown=True): - msg_args = {'instance': instance} - if self.client.can_send_version('4.0'): - version = '4.0' - msg_args['clean_shutdown'] = clean_shutdown - elif self.client.can_send_version('3.37'): - version = '3.37' - msg_args['clean_shutdown'] = clean_shutdown - else: - version = '3.0' + msg_args = {'instance': instance, 'clean_shutdown': clean_shutdown} + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'shelve_offload_instance', **msg_args) def unshelve_instance(self, ctxt, instance, host, image=None, filter_properties=None, node=None): - version = self._compat_ver('4.0', '3.15') + version = '4.0' msg_kwargs = { 'instance': instance, 'image': image, @@ -969,7 +843,7 @@ def unshelve_instance(self, ctxt, instance, host, image=None, def volume_snapshot_create(self, ctxt, instance, volume_id, create_info): - version = self._compat_ver('4.0', '3.6') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance, @@ -977,7 +851,7 @@ def volume_snapshot_create(self, ctxt, instance, volume_id, def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id, delete_info): - version = self._compat_ver('4.0', '3.6') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance, @@ -987,7 +861,7 @@ def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id, def external_instance_event(self, ctxt, instances, events): cctxt = self.client.prepare( server=_compute_host(None, instances[0]), - version=self._compat_ver('4.0', '3.23')) + version='4.0') cctxt.cast(ctxt, 'external_instance_event', instances=instances, events=events) @@ -997,43 +871,6 @@ def build_and_run_instance(self, ctxt, instance, host, image, request_spec, block_device_mapping=None, node=None, limits=None): version = '4.0' - if not self.client.can_send_version(version): - version = '3.40' - if not self.client.can_send_version(version): - version = '3.36' - if 'numa_topology' in limits and limits['numa_topology']: - topology_limits = limits['numa_topology'] - if node is not None: - cnode = objects.ComputeNode.get_by_host_and_nodename( - ctxt, host, node) - else: - cnode = ( - objects.ComputeNode. - get_first_node_by_host_for_old_compat( - ctxt, host)) - host_topology = objects.NUMATopology.obj_from_db_obj( - cnode.numa_topology) - limits['numa_topology'] = jsonutils.dumps( - topology_limits.to_dict_legacy(host_topology)) - if not self.client.can_send_version(version): - version = '3.33' - if 'instance_type' in filter_properties: - flavor = filter_properties['instance_type'] - flavor_p = objects_base.obj_to_primitive(flavor) - filter_properties = dict(filter_properties, - instance_type=flavor_p) - if not self.client.can_send_version(version): - version = '3.23' - if requested_networks is not None: - if utils.is_neutron(): - requested_networks = [(network_id, address, port_id) - for (network_id, address, port_id, _) in - requested_networks.as_tuples()] - else: - requested_networks = [(network_id, address) - for (network_id, address) in - requested_networks.as_tuples()] - cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', instance=instance, image=image, request_spec=request_spec, @@ -1046,33 +883,34 @@ def build_and_run_instance(self, ctxt, instance, host, image, request_spec, limits=limits) def quiesce_instance(self, ctxt, instance): - version = self._compat_ver('4.0', '3.39') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'quiesce_instance', instance=instance) def unquiesce_instance(self, ctxt, instance, mapping=None): - version = self._compat_ver('4.0', '3.39') + version = '4.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unquiesce_instance', instance=instance, mapping=mapping) def refresh_security_group_rules(self, ctxt, security_group_id, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'refresh_security_group_rules', security_group_id=security_group_id) def refresh_security_group_members(self, ctxt, security_group_id, host): - version = self._compat_ver('4.0', '3.0') + version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'refresh_security_group_members', security_group_id=security_group_id) def refresh_instance_security_rules(self, ctxt, host, instance): - version = self._compat_ver('4.0', '3.0') + version = '4.0' + # TODO(danms): This needs to be fixed for objects! instance_p = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index 0a8fb227e2c..b10ce52a8cf 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -152,7 +152,7 @@ def live_migration(self, ctxt, block_migration, disk_over_commit, pass def prep_resize(self, ctxt, instance, instance_type, image, request_spec, - filter_properties, reservations): + filter_properties, reservations, clean_shutown): pass @@ -246,9 +246,7 @@ def fake_show(meh, context, id, **kwargs): fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) - fake_rpcapi = FakeSchedulerAPI() fake_taskapi = FakeComputeTaskAPI() - self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi) self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi) fake_network.set_stub_network_methods(self.stubs) @@ -410,8 +408,7 @@ def test_attach_volume_serial(self): with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata', return_value={})): instance = self._create_fake_instance_obj() - self.compute.attach_volume(self.context, self.volume_id, - '/dev/vdb', instance, bdm=fake_bdm) + self.compute.attach_volume(self.context, instance, bdm=fake_bdm) self.assertEqual(self.cinfo.get('serial'), self.volume_id) def test_attach_volume_raises(self): @@ -431,8 +428,7 @@ def fake_attach(*args, **kwargs): mock_attach.side_effect = fake_attach self.assertRaises( test.TestingException, self.compute.attach_volume, - self.context, 'fake', '/dev/vdb', - instance, bdm=fake_bdm) + self.context, instance, fake_bdm) self.assertTrue(mock_unreserve.called) self.assertTrue(mock_destroy.called) @@ -457,20 +453,6 @@ def test_detach_volume_api_raises(self): fake_bdm) self.assertTrue(mock_destroy.called) - def test_attach_volume_no_bdm(self): - fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) - instance = self._create_fake_instance_obj() - - with contextlib.nested( - mock.patch.object(objects.BlockDeviceMapping, - 'get_by_volume_id', return_value=fake_bdm), - mock.patch.object(self.compute, '_attach_volume') - ) as (mock_get_by_id, mock_attach): - self.compute.attach_volume(self.context, 'fake', '/dev/vdb', - instance, bdm=None) - mock_get_by_id.assert_called_once_with(self.context, 'fake') - self.assertTrue(mock_attach.called) - def test_await_block_device_created_too_slow(self): self.flags(block_device_allocate_retries=2) self.flags(block_device_allocate_retries_interval=0.1) @@ -747,23 +729,29 @@ def test_poll_volume_usage_with_data(self): def test_detach_volume_usage(self): # Test that detach volume update the volume usage cache table correctly instance = self._create_fake_instance_obj() - bdm = fake_block_device.FakeDbBlockDeviceDict( - {'id': 1, 'device_name': '/dev/vdb', - 'connection_info': '{}', 'instance_uuid': instance['uuid'], - 'source_type': 'volume', 'destination_type': 'volume', - 'volume_id': 1}) + bdm = objects.BlockDeviceMapping(context=self.context, + id=1, device_name='/dev/vdb', + connection_info='{}', + instance_uuid=instance['uuid'], + source_type='volume', + destination_type='volume', + no_device=False, + disk_bus='foo', + device_type='disk', + volume_id=1) host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb', 'connection_info': '{}', 'instance_uuid': instance['uuid'], 'volume_id': 1} - self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id') + self.mox.StubOutWithMock(objects.BlockDeviceMapping, + 'get_by_volume_id') self.mox.StubOutWithMock(self.compute.driver, 'block_stats') self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # The following methods will be called - db.block_device_mapping_get_by_volume_id(self.context, 1, []).\ - AndReturn(bdm) + objects.BlockDeviceMapping.get_by_volume_id(self.context, 1).AndReturn( + bdm.obj_clone()) self.compute.driver.block_stats(instance, 'vdb').\ AndReturn([1L, 30L, 1L, 20L, None]) self.compute._get_host_volume_bdms(self.context, @@ -777,8 +765,6 @@ def test_detach_volume_usage(self): 'wr_req': 1, 'wr_bytes': 5, 'instance': instance}]) - db.block_device_mapping_get_by_volume_id(self.context, 1, []).\ - AndReturn(bdm) self.mox.ReplayAll() @@ -787,7 +773,7 @@ def fake_get_volume_encryption_metadata(self, context, volume_id): self.stubs.Set(cinder.API, 'get_volume_encryption_metadata', fake_get_volume_encryption_metadata) - self.compute.attach_volume(self.context, 1, '/dev/vdb', instance) + self.compute.attach_volume(self.context, instance, bdm) # Poll volume usage & then detach the volume. This will update the # total fields in the volume usage cache. @@ -1981,7 +1967,8 @@ def test_stop(self): inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) - self.compute.stop_instance(self.context, instance=inst_obj) + self.compute.stop_instance(self.context, instance=inst_obj, + clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_start(self): @@ -1996,7 +1983,8 @@ def test_start(self): inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) - self.compute.stop_instance(self.context, instance=inst_obj) + self.compute.stop_instance(self.context, instance=inst_obj, + clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) @@ -2014,7 +2002,8 @@ def test_stop_start_no_image(self): inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) - self.compute.stop_instance(self.context, instance=inst_obj) + self.compute.stop_instance(self.context, instance=inst_obj, + clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) @@ -2044,7 +2033,7 @@ def fake_unrescue(self, instance_ref, network_info): instance.task_state = task_states.RESCUING instance.save() - self.compute.rescue_instance(self.context, instance, None) + self.compute.rescue_instance(self.context, instance, None, None, True) self.assertTrue(called['rescued']) instance.task_state = task_states.UNRESCUING instance.save() @@ -2067,7 +2056,7 @@ def fake_rescue(self, context, instance_ref, network_info, image_meta, fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.RESCUING instance.save() - self.compute.rescue_instance(self.context, instance, None) + self.compute.rescue_instance(self.context, instance, None, True, True) expected_notifications = ['compute.instance.rescue.start', 'compute.instance.exists', @@ -2155,7 +2144,8 @@ def test_rescue_handle_err(self): exception.InstanceNotRescuable, expected_message): self.compute.rescue_instance( self.context, instance=inst_obj, - rescue_password='password') + rescue_password='password', rescue_image_ref=None, + clean_shutdown=True) self.assertEqual('some_random_state', inst_obj.vm_state) @@ -2176,7 +2166,8 @@ def test_rescue_with_image_specified(self, mock_rescue, mock_get_image_metadata.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, - rescue_password="password", rescue_image_ref=image_ref) + rescue_password="password", rescue_image_ref=image_ref, + clean_shutdown=True) mock_get_image_metadata.assert_called_with(ctxt, self.compute.image_api, @@ -2204,7 +2195,9 @@ def test_rescue_with_base_image_when_image_not_specified(self, mock_get_image_metadata.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, - rescue_password="password") + rescue_password="password", + rescue_image_ref=None, + clean_shutdown=True) mock_get_image_metadata.assert_called_with(ctxt, self.compute.image_api, @@ -2259,7 +2252,8 @@ def fake_driver_power_off(self, instance, expected_attrs=extra) inst_obj.task_state = task_states.POWERING_OFF inst_obj.save() - self.compute.stop_instance(self.context, instance=inst_obj) + self.compute.stop_instance(self.context, instance=inst_obj, + clean_shutdown=True) self.assertTrue(called['power_off']) self.compute.terminate_instance(self.context, inst_obj, [], []) @@ -3865,30 +3859,6 @@ def _create_server_group(self, policies, instance_host): self.assertEqual('servergroup.create', msg.event_type) return instance_group - def _run_instance_reschedules_on_policy_violation(self, group, - hint): - instance = self._create_fake_instance_obj() - filter_properties = {'scheduler_hints': {'group': hint}} - self.assertRaises(exception.RescheduledException, - self.compute._build_instance, - self.context, {}, filter_properties, - [], None, None, True, None, instance, - None, False) - - def test_run_instance_reschedules_on_anti_affinity_violation_by_name(self): - group = self._create_server_group(['anti-affinity'], self.compute.host) - self._run_instance_reschedules_on_policy_violation(group, group.name) - - def test_run_instance_reschedules_on_anti_affinity_violation_by_uuid(self): - group = self._create_server_group(['anti-affinity'], self.compute.host) - self._run_instance_reschedules_on_policy_violation(group, group.uuid) - - def test_run_instance_reschedules_on_affinity_violation_by_uuid(self): - # Put the fake instance already in the group on a different host - hostname = self.compute.host + '.1' - group = self._create_server_group(['affinity'], hostname) - self._run_instance_reschedules_on_policy_violation(group, group.uuid) - def test_instance_set_to_error_on_uncaught_exception(self): # Test that instance is set to error state when exception is raised. instance = self._create_fake_instance_obj() @@ -4187,7 +4157,8 @@ def test_state_revert(self): ("reboot_instance", task_states.REBOOTING, {'block_device_info': [], 'reboot_type': 'SOFT'}), - ("stop_instance", task_states.POWERING_OFF), + ("stop_instance", task_states.POWERING_OFF, + {'clean_shutdown': True}), ("start_instance", task_states.POWERING_ON), ("terminate_instance", task_states.DELETING, {'bdms': [], @@ -4208,7 +4179,9 @@ def test_state_revert(self): ("set_admin_password", task_states.UPDATING_PASSWORD, {'new_pass': None}), ("rescue_instance", task_states.RESCUING, - {'rescue_password': None}), + {'rescue_password': None, + 'rescue_image_ref': None, + 'clean_shutdown': True}), ("unrescue_instance", task_states.UNRESCUING), ("revert_resize", task_states.RESIZE_REVERTING, {'migration': migration, @@ -4219,12 +4192,14 @@ def test_state_revert(self): 'reservations': [], 'request_spec': {}, 'filter_properties': {}, - 'node': None}), + 'node': None, + 'clean_shutdown': True}), ("resize_instance", task_states.RESIZE_PREP, {'migration': migration, 'image': {}, 'reservations': [], - 'instance_type': {}}), + 'instance_type': {}, + 'clean_shutdown': True}), ("pause_instance", task_states.PAUSING), ("unpause_instance", task_states.UNPAUSING), ("suspend_instance", task_states.SUSPENDING), @@ -4343,7 +4318,8 @@ def _test_finish_resize(self, power_on): self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) instance.task_state = task_states.RESIZE_MIGRATED instance.save() @@ -4527,8 +4503,7 @@ def fake_attach_volume(*args, **kwargs): fake_attach_volume) # attach volume to instance - self.compute.attach_volume(self.context, volume['id'], - '/dev/vdc', instance, bdm=bdm) + self.compute.attach_volume(self.context, instance, bdm) # assert volume attached correctly self.assertEqual(volume['device_name'], '/dev/vdc') @@ -4547,7 +4522,8 @@ def fake_attach_volume(*args, **kwargs): self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) # fake out detach for prep_resize (and later terminate) def fake_terminate_connection(self, context, volume, connector): @@ -4562,7 +4538,8 @@ def fake_terminate_connection(self, context, volume, connector): instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], - instance_type=jsonutils.to_primitive(instance_type)) + instance_type=jsonutils.to_primitive(instance_type), + clean_shutdown=True) # assert bdm is unchanged disk_info = db.block_device_mapping_get_all_by_instance( @@ -4642,7 +4619,7 @@ def fake(*args, **kwargs): instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, - node=None) + node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), @@ -4768,7 +4745,8 @@ def test_finish_resize_instance_notification(self): self.compute.prep_resize(self.context, instance=instance, instance_type=new_type, image={}, reservations=[], - request_spec={}, filter_properties={}, node=None) + request_spec={}, filter_properties={}, node=None, + clean_shutdown=True) self._stub_out_resize_network_methods() @@ -4777,7 +4755,7 @@ def test_finish_resize_instance_notification(self): instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, instance_type=new_type, - reservations=[]) + reservations=[], clean_shutdown=True) timeutils.set_time_override(cur_time) fake_notifier.NOTIFICATIONS = [] @@ -4827,7 +4805,8 @@ def test_resize_instance_notification(self): instance_type = flavors.get_default_flavor() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], - request_spec={}, filter_properties={}, node=None) + request_spec={}, filter_properties={}, node=None, + clean_shutdown=True) db.migration_get_by_instance_and_status(self.context.elevated(), instance.uuid, 'pre-migrating') @@ -4879,7 +4858,8 @@ def test_prep_resize_instance_migration_error_on_same_host(self): self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_prep_resize_instance_migration_error_on_none_host(self): @@ -4900,7 +4880,8 @@ def test_prep_resize_instance_migration_error_on_none_host(self): self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_driver_error(self): @@ -4924,7 +4905,8 @@ def throw_up(*args, **kwargs): self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( @@ -4936,7 +4918,8 @@ def throw_up(*args, **kwargs): self.context, instance=instance, migration=migration, image={}, reservations=reservations, - instance_type=jsonutils.to_primitive(instance_type)) + instance_type=jsonutils.to_primitive(instance_type), + clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() @@ -4962,7 +4945,8 @@ def throw_up(*args, **kwargs): self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) instance.task_state = task_states.RESIZE_PREP instance.save() @@ -4974,7 +4958,8 @@ def throw_up(*args, **kwargs): self.context, instance=instance, migration=migration, image={}, reservations=reservations, - instance_type=jsonutils.to_primitive(instance_type)) + instance_type=jsonutils.to_primitive(instance_type), + clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() @@ -4993,7 +4978,8 @@ def _test_resize_instance(self, clean_shutdown=True): instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], - request_spec={}, filter_properties={}, node=None) + request_spec={}, filter_properties={}, node=None, + clean_shutdown=True) # verify 'old_vm_state' was set on system_metadata instance.refresh() @@ -5088,7 +5074,7 @@ def fake_confirm_migration_driver(*args, **kwargs): instance=instance, instance_type=new_instance_type_p, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), @@ -5103,7 +5089,8 @@ def fake_confirm_migration_driver(*args, **kwargs): migration=migration, image={}, reservations=[], - instance_type=new_instance_type_p) + instance_type=new_instance_type_p, + clean_shutdown=True) self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) @@ -5192,7 +5179,8 @@ def fake_finish_revert_migration_driver(*args, **kwargs): instance=instance, instance_type=new_instance_type_p, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), @@ -5207,7 +5195,8 @@ def fake_finish_revert_migration_driver(*args, **kwargs): migration=migration, image={}, reservations=[], - instance_type=new_instance_type_p) + instance_type=new_instance_type_p, + clean_shutdown=True) self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) @@ -5315,7 +5304,7 @@ def test_resize_same_source_fails(self): self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_handles_migration_error(self): @@ -5339,7 +5328,7 @@ def raise_migration_failure(*args): instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, - node=None) + node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') @@ -5349,7 +5338,8 @@ def raise_migration_failure(*args): self.context, instance=instance, migration=migration, image={}, reservations=reservations, - instance_type=jsonutils.to_primitive(instance_type)) + instance_type=jsonutils.to_primitive(instance_type), + clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() @@ -5791,7 +5781,9 @@ def test_rollback_live_migration_at_destination_correctly(self): # start test self.mox.ReplayAll() ret = self.compute.rollback_live_migration_at_destination(c, - instance=instance) + instance=instance, + destroy_disks=True, + migrate_data=None) self.assertIsNone(ret) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] @@ -9430,8 +9422,8 @@ def fake_terminate_connection(self, context, volume_id, connector): fake_terminate_connection) # Kill the instance and check that it was detached - bdms = db.block_device_mapping_get_all_by_instance(admin, - instance['uuid']) + bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( + admin, instance['uuid']) self.compute.terminate_instance(admin, instance, bdms, []) self.assertTrue(result["detached"]) @@ -10760,14 +10752,10 @@ def _reschedule(self, request_spec=None, filter_properties=None, instance = self._create_fake_instance_obj() - admin_password = None - injected_files = None - requested_networks = None - is_first_time = False - - scheduler_method = self.compute.scheduler_rpcapi.run_instance - method_args = (request_spec, admin_password, injected_files, - requested_networks, is_first_time, filter_properties) + scheduler_method = self.compute.compute_task_api.resize_instance + method_args = (instance, None, + dict(filter_properties=filter_properties), + {}, None) return self.compute._reschedule(self.context, request_spec, filter_properties, instance, scheduler_method, method_args, self.expected_task_state, exc_info=exc_info) @@ -10804,273 +10792,10 @@ def test_reschedule_success(self): self.assertEqual(exc_str, filter_properties['retry']['exc']) -class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase): - """Test re-scheduling logic for prep_resize requests.""" - - def setUp(self): - super(ComputeReschedulingResizeTestCase, self).setUp() - self.expected_task_state = task_states.RESIZE_PREP - - def _reschedule(self, request_spec=None, filter_properties=None, - exc_info=None): - if not filter_properties: - filter_properties = {} - - instance_uuid = str(uuid.uuid4()) - instance = self._create_fake_instance_obj( - params={'uuid': instance_uuid}) - instance_type = {} - reservations = None - - scheduler_method = self.compute.compute_task_api.resize_instance - scheduler_hint = dict(filter_properties=filter_properties) - method_args = (instance, None, scheduler_hint, instance_type, - reservations) - - return self.compute._reschedule(self.context, request_spec, - filter_properties, instance, scheduler_method, - method_args, self.expected_task_state, exc_info=exc_info) - - class InnerTestingException(Exception): pass -class ComputeRescheduleOrErrorTestCase(BaseTestCase): - """Test logic and exception handling around rescheduling or re-raising - original exceptions when builds fail. - """ - - def setUp(self): - super(ComputeRescheduleOrErrorTestCase, self).setUp() - self.instance = self._create_fake_instance_obj() - - def test_reschedule_or_error_called(self): - """Basic sanity check to make sure _reschedule_or_error is called - when a build fails. - """ - self.mox.StubOutWithMock(objects.BlockDeviceMappingList, - 'get_by_instance_uuid') - self.mox.StubOutWithMock(self.compute, '_spawn') - self.mox.StubOutWithMock(self.compute, '_reschedule_or_error') - - bdms = block_device_obj.block_device_make_list(self.context, []) - - objects.BlockDeviceMappingList.get_by_instance_uuid( - mox.IgnoreArg(), self.instance.uuid).AndReturn(bdms) - self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), - [], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise( - test.TestingException("BuildError")) - self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), None, None, None, - False, None, {}, bdms, False).AndReturn(True) - - self.mox.ReplayAll() - self.compute._run_instance(self.context, None, {}, None, None, None, - False, None, self.instance, False) - - def test_shutdown_instance_fail(self): - """Test shutdown instance failing before re-scheduling logic can even - run. - """ - instance_uuid = self.instance['uuid'] - self.mox.StubOutWithMock(self.compute, '_shutdown_instance') - - try: - raise test.TestingException("Original") - except Exception: - exc_info = sys.exc_info() - - compute_utils.add_instance_fault_from_exc(self.context, - self.instance, exc_info[0], exc_info=exc_info) - self.compute._shutdown_instance(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), - mox.IgnoreArg()).AndRaise(InnerTestingException("Error")) - self.compute._log_original_error(exc_info, instance_uuid) - - self.mox.ReplayAll() - - # should raise the deallocation exception, not the original build - # error: - self.assertRaises(InnerTestingException, - self.compute._reschedule_or_error, self.context, - self.instance, exc_info, None, None, None, False, None, {}) - - def test_shutdown_instance_fail_instance_info_cache_not_found(self): - # Covers the case that _shutdown_instance fails with an - # InstanceInfoCacheNotFound exception when getting instance network - # information prior to calling driver.destroy. - elevated_context = self.context.elevated() - error = exception.InstanceInfoCacheNotFound( - instance_uuid=self.instance['uuid']) - with contextlib.nested( - mock.patch.object(self.context, 'elevated', - return_value=elevated_context), - mock.patch.object(self.compute.network_api, 'get_instance_nw_info', - side_effect=error), - mock.patch.object(self.compute, - '_get_instance_block_device_info'), - mock.patch.object(self.compute.driver, 'destroy'), - mock.patch.object(self.compute, '_try_deallocate_network') - ) as ( - elevated_mock, - _get_instance_nw_info_mock, - _get_instance_block_device_info_mock, - destroy_mock, - _try_deallocate_network_mock - ): - inst_obj = self.instance - self.compute._shutdown_instance(self.context, inst_obj, - bdms=[], notify=False) - # By asserting that _try_deallocate_network_mock was called - # exactly once, we know that _get_instance_nw_info raising - # InstanceInfoCacheNotFound did not make _shutdown_instance error - # out and driver.destroy was still called. - _try_deallocate_network_mock.assert_called_once_with( - elevated_context, inst_obj, None) - - def test_reschedule_fail(self): - # Test handling of exception from _reschedule. - try: - raise test.TestingException("Original") - except Exception: - exc_info = sys.exc_info() - - instance_uuid = self.instance['uuid'] - method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_shutdown_instance') - self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') - self.mox.StubOutWithMock(self.compute, '_reschedule') - - self.compute._shutdown_instance(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), - mox.IgnoreArg()) - self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid, - mox.IgnoreArg()) - self.compute._reschedule(self.context, None, self.instance, - {}, self.compute.scheduler_rpcapi.run_instance, - method_args, task_states.SCHEDULING, exc_info).AndRaise( - InnerTestingException("Inner")) - - self.mox.ReplayAll() - - self.assertFalse(self.compute._reschedule_or_error(self.context, - self.instance, exc_info, None, None, None, False, None, {})) - - def test_reschedule_false(self): - # Test not-rescheduling, but no nested exception. - instance_uuid = self.instance['uuid'] - method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_shutdown_instance') - self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') - self.mox.StubOutWithMock(self.compute, '_reschedule') - - try: - raise test.TestingException("Original") - except test.TestingException: - exc_info = sys.exc_info() - compute_utils.add_instance_fault_from_exc(self.context, - self.instance, exc_info[0], exc_info=exc_info) - - self.compute._shutdown_instance(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), - mox.IgnoreArg()) - self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid, - mox.IgnoreArg()) - self.compute._reschedule(self.context, None, {}, self.instance, - self.compute.scheduler_rpcapi.run_instance, method_args, - task_states.SCHEDULING, exc_info).AndReturn(False) - - self.mox.ReplayAll() - - # re-scheduling is False, the original build error should be - # raised here: - self.assertFalse(self.compute._reschedule_or_error(self.context, - self.instance, exc_info, None, None, None, False, None, {})) - - def test_reschedule_true(self): - # Test behavior when re-scheduling happens. - instance_uuid = self.instance['uuid'] - method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_shutdown_instance') - self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') - self.mox.StubOutWithMock(self.compute, '_reschedule') - - try: - raise test.TestingException("Original") - except Exception: - exc_info = sys.exc_info() - - compute_utils.add_instance_fault_from_exc(self.context, - self.instance, exc_info[0], exc_info=exc_info) - self.compute._shutdown_instance(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), - mox.IgnoreArg()) - self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid, - mox.IgnoreArg()) - self.compute._reschedule(self.context, None, {}, self.instance, - self.compute.scheduler_rpcapi.run_instance, - method_args, task_states.SCHEDULING, exc_info).AndReturn( - True) - self.compute._log_original_error(exc_info, instance_uuid) - - self.mox.ReplayAll() - - # re-scheduling is True, original error is logged, but nothing - # is raised: - self.compute._reschedule_or_error(self.context, self.instance, - exc_info, None, None, None, False, None, {}) - - def test_no_reschedule_on_delete_during_spawn(self): - # instance should not be rescheduled if instance is deleted - # during the build - self.mox.StubOutWithMock(self.compute, '_spawn') - self.mox.StubOutWithMock(self.compute, '_reschedule_or_error') - - exc = exception.UnexpectedDeletingTaskStateError( - expected=task_states.SPAWNING, actual=task_states.DELETING) - self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg(), set_access_ip=False).AndRaise(exc) - - self.mox.ReplayAll() - # test succeeds if mocked method '_reschedule_or_error' is not - # called. - self.compute._run_instance(self.context, None, {}, None, None, None, - False, None, self.instance, False) - - def test_no_reschedule_on_unexpected_task_state(self): - # instance shouldn't be rescheduled if unexpected task state arises. - # the exception should get reraised. - self.mox.StubOutWithMock(self.compute, '_spawn') - self.mox.StubOutWithMock(self.compute, '_reschedule_or_error') - - exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING, - actual=task_states.SCHEDULING) - self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg(), set_access_ip=False).AndRaise(exc) - - self.mox.ReplayAll() - self.assertRaises(exception.UnexpectedTaskStateError, - self.compute._run_instance, self.context, None, {}, None, None, - None, False, None, self.instance, False) - - def test_no_reschedule_on_block_device_fail(self): - self.mox.StubOutWithMock(self.compute, '_prep_block_device') - self.mox.StubOutWithMock(self.compute, '_reschedule_or_error') - - exc = exception.InvalidBDM() - - self.compute._prep_block_device(mox.IgnoreArg(), self.instance, - mox.IgnoreArg()).AndRaise(exc) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidBDM, self.compute._run_instance, - self.context, None, {}, None, None, None, False, - None, self.instance, False) - - class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase): """Test logic and exception handling around rescheduling prep resize requests @@ -11105,7 +10830,8 @@ def test_reschedule_resize_or_reraise_called(self): instance=inst_obj, instance_type=self.instance_type, reservations=[], request_spec={}, - filter_properties={}, node=None) + filter_properties={}, node=None, + clean_shutdown=True) def test_reschedule_fails_with_exception(self): """Original exception should be raised if the _reschedule method @@ -11242,8 +10968,8 @@ def _test_rebuild(mock_setup_instance_network_on_host, orig_image_ref = None image_ref = None injected_files = None - bdms = db.block_device_mapping_get_all_by_instance(self.context, - self.inst.uuid) + bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( + self.context, self.inst.uuid) self.compute.rebuild_instance( mock_context, self.inst, orig_image_ref, image_ref, injected_files, 'newpass', {}, bdms, recreate=True, @@ -11483,31 +11209,6 @@ def test_injected_invalid(self): block_device_mapping=[], injected_files=injected_files) - def test_reschedule(self): - # test that rescheduling is done with original encoded files - expected = [ - ('/a/b/c', base64.b64encode('foobarbaz')), - ('/d/e/f', base64.b64encode('seespotrun')), - ] - - def _roe(context, instance, exc_info, requested_networks, - admin_password, injected_files, is_first_time, request_spec, - filter_properties, bdms=None, legacy_bdm_in_spec=False): - self.assertEqual(expected, injected_files) - return True - - def spawn_explode(context, instance, image_meta, injected_files, - admin_password, nw_info, block_device_info): - # force reschedule logic to execute - raise test.TestingException("spawn error") - - self.stubs.Set(self.compute.driver, 'spawn', spawn_explode) - self.stubs.Set(self.compute, '_reschedule_or_error', _roe) - - self.compute.build_and_run_instance(self.context, self.instance, - {}, {}, {}, - block_device_mapping=[]) - class CheckConfigDriveTestCase(test.NoDBTestCase): # NOTE(sirp): `TestCase` is far too heavyweight for this test, this should diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index faae1bf06a8..136f3340288 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -222,103 +222,6 @@ def test_allocate_network_succeeds_after_retries( self.assertFalse(mock_save.called) self.assertEqual('True', instance.system_metadata['network_allocated']) - def test_allocate_network_maintains_context(self): - # override tracker with a version that doesn't need the database: - class FakeResourceTracker(object): - @staticmethod - def instance_claim(context, instance, limits): - return mock.MagicMock() - - self.mox.StubOutWithMock(self.compute, '_get_resource_tracker') - self.mox.StubOutWithMock(self.compute, '_reschedule_or_error') - self.mox.StubOutWithMock(self.compute, '_allocate_network') - self.mox.StubOutWithMock(objects.BlockDeviceMappingList, - 'get_by_instance_uuid') - - instance = fake_instance.fake_instance_obj(self.context) - - objects.BlockDeviceMappingList.get_by_instance_uuid( - mox.IgnoreArg(), instance.uuid).AndReturn([]) - - node = 'fake_node' - self.compute._get_resource_tracker(node).AndReturn( - FakeResourceTracker()) - - self.admin_context = False - - def fake_allocate(context, *args, **kwargs): - if context.is_admin: - self.admin_context = True - raise test.TestingException() - - # NOTE(vish): The nice mox parameter matchers here don't work well - # because they raise an exception that gets wrapped by - # the retry exception handling, so use a side effect - # to keep track of whether allocate was called with admin - # context. - self.compute._allocate_network(mox.IgnoreArg(), instance, - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).WithSideEffects(fake_allocate) - - self.compute._reschedule_or_error(mox.IgnoreArg(), instance, - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()) - - self.mox.ReplayAll() - - self.assertRaises(test.TestingException, - self.compute._build_instance, - self.context, {}, {}, - None, None, None, True, - node, instance, - {}, False) - self.assertFalse(self.admin_context, - "_allocate_network called with admin context") - - def test_reschedule_maintains_context(self): - # override tracker with a version that causes a reschedule - class FakeResourceTracker(object): - def instance_claim(self, context, instance, limits): - raise test.TestingException() - - self.mox.StubOutWithMock(self.compute, '_get_resource_tracker') - self.mox.StubOutWithMock(self.compute, '_reschedule_or_error') - self.mox.StubOutWithMock(objects.BlockDeviceMappingList, - 'get_by_instance_uuid') - instance = fake_instance.fake_instance_obj(self.context) - - objects.BlockDeviceMappingList.get_by_instance_uuid( - mox.IgnoreArg(), instance.uuid).AndReturn([]) - - node = 'fake_node' - self.compute._get_resource_tracker(node).AndReturn( - FakeResourceTracker()) - - self.admin_context = False - - def fake_retry_or_error(context, *args, **kwargs): - if context.is_admin: - self.admin_context = True - - # NOTE(vish): we could use a mos parameter matcher here but it leads - # to a very cryptic error message, so use the same method - # as the allocate_network_maintains_context test. - self.compute._reschedule_or_error(mox.IgnoreArg(), instance, - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).WithSideEffects(fake_retry_or_error) - - self.mox.ReplayAll() - - self.assertRaises(test.TestingException, - self.compute._build_instance, self.context, {}, {}, - None, None, None, True, node, instance, {}, False) - self.assertFalse(self.admin_context, - "_reschedule_or_error called with admin context") - def test_allocate_network_fails(self): self.flags(network_allocate_retries=0) @@ -1073,7 +976,7 @@ def test_init_instance_retries_power_off(self): instance.host = self.compute.host with mock.patch.object(self.compute, 'stop_instance'): self.compute._init_instance(self.context, instance) - call = mock.call(self.context, instance) + call = mock.call(self.context, instance, True) self.compute.stop_instance.assert_has_calls([call]) def test_init_instance_retries_power_on(self): @@ -1112,7 +1015,7 @@ def test_init_instance_retries_power_off_silent_exception(self): with mock.patch.object(self.compute, 'stop_instance', return_value=Exception): init_return = self.compute._init_instance(self.context, instance) - call = mock.call(self.context, instance) + call = mock.call(self.context, instance, True) self.compute.stop_instance.assert_has_calls([call]) self.assertIsNone(init_return) @@ -2262,41 +2165,6 @@ def test_cleanup_volumes_exception_raise(self): calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) - def test_start_building(self): - instance = fake_instance.fake_instance_obj(self.context) - with mock.patch.object(self.compute, '_instance_update') as update: - self.compute._start_building(self.context, instance) - update.assert_called_once_with( - self.context, instance.uuid, vm_state=vm_states.BUILDING, - task_state=None, expected_task_state=(task_states.SCHEDULING, - None)) - - def _test_prebuild_instance_build_abort_exception(self, exc): - instance = fake_instance.fake_instance_obj(self.context) - with contextlib.nested( - mock.patch.object(self.compute, '_check_instance_exists'), - mock.patch.object(self.compute, '_start_building', - side_effect=exc) - ) as ( - check, start - ): - # run the code - self.assertRaises(exception.BuildAbortException, - self.compute._prebuild_instance, - self.context, instance) - # assert the calls - check.assert_called_once_with(self.context, instance) - start.assert_called_once_with(self.context, instance) - - def test_prebuild_instance_instance_not_found(self): - self._test_prebuild_instance_build_abort_exception( - exception.InstanceNotFound(instance_id='fake')) - - def test_prebuild_instance_unexpected_deleting_task_state_err(self): - self._test_prebuild_instance_build_abort_exception( - exception.UnexpectedDeletingTaskStateError(expected='foo', - actual='bar')) - def test_stop_instance_task_state_none_power_state_shutdown(self): # Tests that stop_instance doesn't puke when the instance power_state # is shutdown and the task_state is None. @@ -2311,7 +2179,7 @@ def test_stop_instance_task_state_none_power_state_shutdown(self): @mock.patch.object(instance, 'save') def do_test(save_mock, power_off_mock, notify_mock, get_state_mock): # run the code - self.compute.stop_instance(self.context, instance) + self.compute.stop_instance(self.context, instance, True) # assert the calls self.assertEqual(2, get_state_mock.call_count) notify_mock.assert_has_calls([ @@ -2646,8 +2514,10 @@ def test_build_and_run_instance_with_icehouse_requested_network( filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, - requested_networks=[('fake_network_id', '10.0.0.1', - 'fake_port_id')], + requested_networks=[objects.NetworkRequest( + network_id='fake_network_id', + address='10.0.0.1', + port_id='fake_port_id')], security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) @@ -3674,8 +3544,8 @@ def test_resize_instance_failure(self): self.assertRaises( exception.ResizeError, self.compute.resize_instance, context=self.context, instance=self.instance, image=self.image, - reservations=[], migration=self.migration, instance_type='type' - ) + reservations=[], migration=self.migration, + instance_type='type', clean_shutdown=True) self.assertEqual("error", self.migration.status) self.assertEqual([mock.call(), mock.call()], migration_save.mock_calls) diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py index 6f17a450e9d..308ada0ad71 100644 --- a/nova/tests/unit/compute/test_rpcapi.py +++ b/nova/tests/unit/compute/test_rpcapi.py @@ -25,9 +25,6 @@ from nova.compute import rpcapi as compute_rpcapi from nova import context from nova.objects import block_device as objects_block_dev -from nova.objects import compute_node as objects_compute_node -from nova.objects import network_request as objects_network_request -from nova.objects import numa as objects_numa from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance @@ -151,350 +148,154 @@ def test_add_aggregate_host(self): aggregate={'id': 'fake_id'}, host_param='host', host='host', slave_info={}) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('add_aggregate_host', 'cast', - aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}, version='3.0') - def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', instance=self.fake_instance_obj, network_id='id', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('add_fixed_ip_to_instance', 'cast', - instance=self.fake_instance_obj, network_id='id', - version='3.12') - def test_attach_interface(self): self._test_compute_api('attach_interface', 'call', instance=self.fake_instance_obj, network_id='id', port_id='id2', version='4.0', requested_ip='192.168.1.50') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('attach_interface', 'call', - instance=self.fake_instance_obj, network_id='id', - port_id='id2', version='3.17', requested_ip='192.168.1.50') - def test_attach_volume(self): self._test_compute_api('attach_volume', 'cast', instance=self.fake_instance_obj, volume_id='id', mountpoint='mp', bdm=self.fake_volume_bdm, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('attach_volume', 'cast', - instance=self.fake_instance_obj, volume_id='id', - mountpoint='mp', bdm=self.fake_volume_bdm, version='3.16') - def test_change_instance_metadata(self): self._test_compute_api('change_instance_metadata', 'cast', instance=self.fake_instance_obj, diff={}, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('change_instance_metadata', 'cast', - instance=self.fake_instance_obj, diff={}, version='3.7') - - @mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations') - def test_check_can_live_migrate_destination(self, mock_warn): - self._test_compute_api('check_can_live_migrate_destination', 'call', - instance=self.fake_instance_obj, - destination='dest', block_migration=True, - disk_over_commit=True, version='4.0') - - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('check_can_live_migrate_destination', 'call', - instance=self.fake_instance_obj, - destination='dest', block_migration=True, - disk_over_commit=True, version='3.32') - self.assertFalse(mock_warn.called) - - @mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations') - def test_check_can_live_migrate_destination_old_warning(self, mock_warn): - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('check_can_live_migrate_destination', 'call', - instance=self.fake_instance_obj, - destination='dest', block_migration=True, - disk_over_commit=True, version='3.0') - mock_warn.assert_called_once_with() - - @mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations') - def test_check_can_live_migrate_source(self, mock_warn): - self._test_compute_api('check_can_live_migrate_source', 'call', - instance=self.fake_instance_obj, - dest_check_data={"test": "data"}, version='4.0') - - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('check_can_live_migrate_source', 'call', - instance=self.fake_instance_obj, - dest_check_data={"test": "data"}, version='3.32') - self.assertFalse(mock_warn.called) - - @mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations') - def test_check_can_live_migrate_source_old_warning(self, mock_warn): - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('check_can_live_migrate_source', 'call', - instance=self.fake_instance_obj, - dest_check_data={"test": "data"}, version='3.0') - mock_warn.assert_called_once_with() - def test_check_instance_shared_storage(self): self._test_compute_api('check_instance_shared_storage', 'call', instance=self.fake_instance_obj, data='foo', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('check_instance_shared_storage', 'call', - instance=self.fake_instance_obj, data='foo', - version='3.29') - def test_confirm_resize_cast(self): self._test_compute_api('confirm_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'foo'}, host='host', reservations=list('fake_res')) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('confirm_resize', 'cast', - instance=self.fake_instance_obj, migration={'id': 'foo'}, - host='host', reservations=list('fake_res')) - def test_confirm_resize_call(self): self._test_compute_api('confirm_resize', 'call', instance=self.fake_instance_obj, migration={'id': 'foo'}, host='host', reservations=list('fake_res')) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('confirm_resize', 'call', - instance=self.fake_instance_obj, migration={'id': 'foo'}, - host='host', reservations=list('fake_res')) - def test_detach_interface(self): self._test_compute_api('detach_interface', 'cast', version='4.0', instance=self.fake_instance_obj, port_id='fake_id') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('detach_interface', 'cast', - version='3.17', instance=self.fake_instance_obj, - port_id='fake_id') - def test_detach_volume(self): self._test_compute_api('detach_volume', 'cast', instance=self.fake_instance_obj, volume_id='id', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('detach_volume', 'cast', - instance=self.fake_instance_obj, volume_id='id', - version='3.25') - def test_finish_resize(self): self._test_compute_api('finish_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'foo'}, image='image', disk_info='disk_info', host='host', reservations=list('fake_res')) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('finish_resize', 'cast', - instance=self.fake_instance_obj, migration={'id': 'foo'}, - image='image', disk_info='disk_info', host='host', - reservations=list('fake_res')) - def test_finish_revert_resize(self): self._test_compute_api('finish_revert_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, host='host', reservations=list('fake_res')) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('finish_revert_resize', 'cast', - instance=self.fake_instance_obj, migration={'id': 'fake_id'}, - host='host', reservations=list('fake_res')) - def test_get_console_output(self): self._test_compute_api('get_console_output', 'call', instance=self.fake_instance_obj, tail_length='tl', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_console_output', 'call', - instance=self.fake_instance_obj, tail_length='tl', - version='3.28') - def test_get_console_pool_info(self): self._test_compute_api('get_console_pool_info', 'call', console_type='type', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_console_pool_info', 'call', - console_type='type', host='host') - def test_get_console_topic(self): self._test_compute_api('get_console_topic', 'call', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_console_topic', 'call', host='host') - def test_get_diagnostics(self): self._test_compute_api('get_diagnostics', 'call', instance=self.fake_instance_obj, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_diagnostics', 'call', - instance=self.fake_instance_obj, version='3.18') - def test_get_instance_diagnostics(self): self._test_compute_api('get_instance_diagnostics', 'call', assert_dict=True, instance=self.fake_instance_obj, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_instance_diagnostics', 'call', - assert_dict=True, instance=self.fake_instance_obj, - version='3.31') - def test_get_vnc_console(self): self._test_compute_api('get_vnc_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_vnc_console', 'call', - instance=self.fake_instance_obj, console_type='type', - version='3.2') - def test_get_spice_console(self): self._test_compute_api('get_spice_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_spice_console', 'call', - instance=self.fake_instance_obj, console_type='type', - version='3.1') - def test_get_rdp_console(self): self._test_compute_api('get_rdp_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_rdp_console', 'call', - instance=self.fake_instance_obj, console_type='type', - version='3.10') - def test_get_serial_console(self): self._test_compute_api('get_serial_console', 'call', instance=self.fake_instance_obj, console_type='serial', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_serial_console', 'call', - instance=self.fake_instance_obj, console_type='serial', - version='3.34') - def test_validate_console_port(self): self._test_compute_api('validate_console_port', 'call', instance=self.fake_instance_obj, port="5900", console_type="novnc", version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('validate_console_port', 'call', - instance=self.fake_instance_obj, port="5900", - console_type="novnc", version='3.3') - def test_host_maintenance_mode(self): self._test_compute_api('host_maintenance_mode', 'call', host_param='param', mode='mode', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('host_maintenance_mode', 'call', - host_param='param', mode='mode', host='host') - def test_host_power_action(self): self._test_compute_api('host_power_action', 'call', action='action', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('host_power_action', 'call', action='action', - host='host') - def test_inject_network_info(self): self._test_compute_api('inject_network_info', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('inject_network_info', 'cast', - instance=self.fake_instance_obj) - def test_live_migration(self): self._test_compute_api('live_migration', 'cast', instance=self.fake_instance_obj, dest='dest', block_migration='blockity_block', host='tsoh', migrate_data={}, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('live_migration', 'cast', - instance=self.fake_instance_obj, dest='dest', - block_migration='blockity_block', host='tsoh', - migrate_data={}, version='3.26') - def test_post_live_migration_at_destination(self): self._test_compute_api('post_live_migration_at_destination', 'cast', instance=self.fake_instance_obj, block_migration='block_migration', host='host', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('post_live_migration_at_destination', 'cast', - instance=self.fake_instance_obj, - block_migration='block_migration', host='host', version='3.14') - def test_pause_instance(self): self._test_compute_api('pause_instance', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('pause_instance', 'cast', - instance=self.fake_instance_obj) - def test_soft_delete_instance(self): self._test_compute_api('soft_delete_instance', 'cast', instance=self.fake_instance_obj, reservations=['uuid1', 'uuid2']) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('soft_delete_instance', 'cast', - instance=self.fake_instance_obj, - reservations=['uuid1', 'uuid2']) - def test_swap_volume(self): self._test_compute_api('swap_volume', 'cast', instance=self.fake_instance_obj, old_volume_id='oldid', new_volume_id='newid') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('swap_volume', 'cast', - instance=self.fake_instance_obj, old_volume_id='oldid', - new_volume_id='newid') - def test_restore_instance(self): self._test_compute_api('restore_instance', 'cast', instance=self.fake_instance_obj, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('restore_instance', 'cast', - instance=self.fake_instance_obj, version='3.20') - def test_pre_live_migration(self): self._test_compute_api('pre_live_migration', 'call', instance=self.fake_instance_obj, block_migration='block_migration', disk='disk', host='host', migrate_data=None, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('pre_live_migration', 'call', - instance=self.fake_instance_obj, - block_migration='block_migration', disk='disk', host='host', - migrate_data=None, version='3.19') - def test_prep_resize(self): self._test_compute_api('prep_resize', 'cast', instance=self.fake_instance_obj, instance_type='fake_type', @@ -504,31 +305,6 @@ def test_prep_resize(self): filter_properties={'fakeprop': 'fakeval'}, node='node', clean_shutdown=True, version='4.0') - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('prep_resize', 'cast', - instance=self.fake_instance_obj, instance_type='fake_type', - image='fake_image', host='host', - reservations=list('fake_res'), - request_spec='fake_spec', - filter_properties={'fakeprop': 'fakeval'}, - node='node', version='3.0') - self.flags(compute='3.38', group='upgrade_levels') - self._test_compute_api('prep_resize', 'cast', - instance=self.fake_instance_obj, instance_type='fake_type', - image='fake_image', host='host', - reservations=list('fake_res'), - request_spec='fake_spec', - filter_properties={'fakeprop': 'fakeval'}, - node='node', clean_shutdown=True, version='3.38') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('prep_resize', 'cast', - instance=self.fake_instance_obj, instance_type='fake_type', - image='fake_image', host='host', - reservations=list('fake_res'), - request_spec='fake_spec', - filter_properties={'fakeprop': 'fakeval'}, - node='node', clean_shutdown=True, version='3.38') - def test_reboot_instance(self): self.maxDiff = None self._test_compute_api('reboot_instance', 'cast', @@ -536,12 +312,6 @@ def test_reboot_instance(self): block_device_info={}, reboot_type='type') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('reboot_instance', 'cast', - instance=self.fake_instance_obj, - block_device_info={}, - reboot_type='type') - def test_rebuild_instance(self): self._test_compute_api('rebuild_instance', 'cast', new_pass='None', injected_files='None', image_ref='None', orig_image_ref='None', @@ -549,13 +319,6 @@ def test_rebuild_instance(self): orig_sys_metadata=None, recreate=True, on_shared_storage=True, preserve_ephemeral=True, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('rebuild_instance', 'cast', new_pass='None', - injected_files='None', image_ref='None', orig_image_ref='None', - bdms=[], instance=self.fake_instance_obj, host='new_host', - orig_sys_metadata=None, recreate=True, on_shared_storage=True, - preserve_ephemeral=True, version='3.21') - def test_reserve_block_device_name(self): self._test_compute_api('reserve_block_device_name', 'call', instance=self.fake_instance_obj, device='device', @@ -563,320 +326,125 @@ def test_reserve_block_device_name(self): version='4.0', _return_value=objects_block_dev.BlockDeviceMapping()) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('reserve_block_device_name', 'call', - instance=self.fake_instance_obj, device='device', - volume_id='id', disk_bus='ide', device_type='cdrom', - version='3.35', return_bdm_object=True) - def refresh_provider_fw_rules(self): self._test_compute_api('refresh_provider_fw_rules', 'cast', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('refresh_provider_fw_rules', 'cast', - host='host') - def test_refresh_security_group_rules(self): self._test_compute_api('refresh_security_group_rules', 'cast', security_group_id='id', host='host', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('refresh_security_group_rules', 'cast', - security_group_id='id', host='host', version='3.0') - def test_refresh_security_group_members(self): self._test_compute_api('refresh_security_group_members', 'cast', security_group_id='id', host='host', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('refresh_security_group_members', 'cast', - security_group_id='id', host='host', version='3.0') - def test_refresh_instance_security_rules(self): self._test_compute_api('refresh_instance_security_rules', 'cast', host='fake_host', instance=self.fake_instance_obj, version='4.0', assert_dict=True) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('refresh_instance_security_rules', 'cast', - host='fake_host', instance=self.fake_instance_obj, - version='3.0', assert_dict=True) - def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', slave_info={}) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('remove_aggregate_host', 'cast', - aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) - def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', instance=self.fake_instance_obj, address='addr', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('remove_fixed_ip_from_instance', 'cast', - instance=self.fake_instance_obj, address='addr', - version='3.13') - def test_remove_volume_connection(self): self._test_compute_api('remove_volume_connection', 'call', instance=self.fake_instance, volume_id='id', host='host', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('remove_volume_connection', 'call', - instance=self.fake_instance, volume_id='id', host='host', - version='3.30') - def test_rescue_instance(self): self._test_compute_api('rescue_instance', 'cast', instance=self.fake_instance_obj, rescue_password='pw', rescue_image_ref='fake_image_ref', clean_shutdown=True, version='4.0') - self.flags(compute='3.9', group='upgrade_levels') - self._test_compute_api('rescue_instance', 'cast', - instance=self.fake_instance_obj, rescue_password='pw', - version='3.9') - self.flags(compute='3.24', group='upgrade_levels') - self._test_compute_api('rescue_instance', 'cast', - instance=self.fake_instance_obj, rescue_password='pw', - rescue_image_ref='fake_image_ref', version='3.24') - self.flags(compute='3.37', group='upgrade_levels') - self._test_compute_api('rescue_instance', 'cast', - instance=self.fake_instance_obj, rescue_password='pw', - rescue_image_ref='fake_image_ref', - clean_shutdown=True, version='3.37') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('rescue_instance', 'cast', - instance=self.fake_instance_obj, rescue_password='pw', - rescue_image_ref='fake_image_ref', - clean_shutdown=True, version='3.37') def test_reset_network(self): self._test_compute_api('reset_network', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('reset_network', 'cast', - instance=self.fake_instance_obj) - def test_resize_instance(self): self._test_compute_api('resize_instance', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, image='image', instance_type={'id': 1}, reservations=list('fake_res'), clean_shutdown=True, version='4.0') - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('resize_instance', 'cast', - instance=self.fake_instance_obj, migration={'id': 'fake_id'}, - image='image', instance_type={'id': 1}, - reservations=list('fake_res'), version='3.0') - self.flags(compute='3.37', group='upgrade_levels') - self._test_compute_api('resize_instance', 'cast', - instance=self.fake_instance_obj, migration={'id': 'fake_id'}, - image='image', instance_type={'id': 1}, - reservations=list('fake_res'), - clean_shutdown=True, version='3.37') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('resize_instance', 'cast', - instance=self.fake_instance_obj, migration={'id': 'fake_id'}, - image='image', instance_type={'id': 1}, - reservations=list('fake_res'), - clean_shutdown=True, version='3.37') def test_resume_instance(self): self._test_compute_api('resume_instance', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('resume_instance', 'cast', - instance=self.fake_instance_obj) - def test_revert_resize(self): self._test_compute_api('revert_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, host='host', reservations=list('fake_res')) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('revert_resize', 'cast', - instance=self.fake_instance_obj, migration={'id': 'fake_id'}, - host='host', reservations=list('fake_res')) - - @mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations') - def test_rollback_live_migration_at_destination(self, mock_warn): - self._test_compute_api('rollback_live_migration_at_destination', - 'cast', instance=self.fake_instance_obj, host='host', - destroy_disks=True, migrate_data=None, version='4.0') - - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('rollback_live_migration_at_destination', - 'cast', instance=self.fake_instance_obj, host='host', - destroy_disks=True, migrate_data=None, version='3.32') - self.assertFalse(mock_warn.called) - - @mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations') - def test_rollback_live_migration_at_destination_old_warning(self, - mock_warn): - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('rollback_live_migration_at_destination', - 'cast', instance=self.fake_instance_obj, host='host', - version='3.0') - mock_warn.assert_called_once_with(None) - def test_set_admin_password(self): self._test_compute_api('set_admin_password', 'call', instance=self.fake_instance_obj, new_pass='pw', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('set_admin_password', 'call', - instance=self.fake_instance_obj, new_pass='pw', - version='3.8') - def test_set_host_enabled(self): self._test_compute_api('set_host_enabled', 'call', enabled='enabled', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('set_host_enabled', 'call', - enabled='enabled', host='host') - def test_get_host_uptime(self): self._test_compute_api('get_host_uptime', 'call', host='host') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('get_host_uptime', 'call', host='host') - def test_backup_instance(self): self._test_compute_api('backup_instance', 'cast', instance=self.fake_instance_obj, image_id='id', backup_type='type', rotation='rotation') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('backup_instance', 'cast', - instance=self.fake_instance_obj, image_id='id', - backup_type='type', rotation='rotation') - def test_snapshot_instance(self): self._test_compute_api('snapshot_instance', 'cast', instance=self.fake_instance_obj, image_id='id') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('snapshot_instance', 'cast', - instance=self.fake_instance_obj, image_id='id') - def test_start_instance(self): self._test_compute_api('start_instance', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('start_instance', 'cast', - instance=self.fake_instance_obj) - def test_stop_instance_cast(self): self._test_compute_api('stop_instance', 'cast', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('stop_instance', 'cast', - instance=self.fake_instance_obj, version='3.0') - self.flags(compute='3.37', group='upgrade_levels') - self._test_compute_api('stop_instance', 'cast', - instance=self.fake_instance_obj, - clean_shutdown=True, version='3.37') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('stop_instance', 'cast', - instance=self.fake_instance_obj, - clean_shutdown=True, version='3.37') def test_stop_instance_call(self): self._test_compute_api('stop_instance', 'call', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('stop_instance', 'call', - instance=self.fake_instance_obj, version='3.0') - self.flags(compute='3.37', group='upgrade_levels') - self._test_compute_api('stop_instance', 'call', - instance=self.fake_instance_obj, - clean_shutdown=True, version='3.37') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('stop_instance', 'call', - instance=self.fake_instance_obj, - clean_shutdown=True, version='3.37') def test_suspend_instance(self): self._test_compute_api('suspend_instance', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('suspend_instance', 'cast', - instance=self.fake_instance_obj) - def test_terminate_instance(self): self._test_compute_api('terminate_instance', 'cast', instance=self.fake_instance_obj, bdms=[], reservations=['uuid1', 'uuid2'], version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('terminate_instance', 'cast', - instance=self.fake_instance_obj, bdms=[], - reservations=['uuid1', 'uuid2'], version='3.22') - def test_unpause_instance(self): self._test_compute_api('unpause_instance', 'cast', instance=self.fake_instance_obj) - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('unpause_instance', 'cast', - instance=self.fake_instance_obj) - def test_unrescue_instance(self): self._test_compute_api('unrescue_instance', 'cast', instance=self.fake_instance_obj, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('unrescue_instance', 'cast', - instance=self.fake_instance_obj, version='3.11') - def test_shelve_instance(self): self._test_compute_api('shelve_instance', 'cast', instance=self.fake_instance_obj, image_id='image_id', clean_shutdown=True, version='4.0') - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('shelve_instance', 'cast', - instance=self.fake_instance_obj, image_id='image_id', - version='3.0') - self.flags(compute='3.37', group='upgrade_levels') - self._test_compute_api('shelve_instance', 'cast', - instance=self.fake_instance_obj, image_id='image_id', - clean_shutdown=True, version='3.37') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('shelve_instance', 'cast', - instance=self.fake_instance_obj, image_id='image_id', - clean_shutdown=True, version='3.37') def test_shelve_offload_instance(self): self._test_compute_api('shelve_offload_instance', 'cast', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') - self.flags(compute='3.0', group='upgrade_levels') - self._test_compute_api('shelve_offload_instance', 'cast', - instance=self.fake_instance_obj, - version='3.0') - self.flags(compute='3.37', group='upgrade_levels') - self._test_compute_api('shelve_offload_instance', 'cast', - instance=self.fake_instance_obj, - clean_shutdown=True, version='3.37') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('shelve_offload_instance', 'cast', - instance=self.fake_instance_obj, - clean_shutdown=True, version='3.37') def test_unshelve_instance(self): self._test_compute_api('unshelve_instance', 'cast', @@ -884,44 +452,22 @@ def test_unshelve_instance(self): filter_properties={'fakeprop': 'fakeval'}, node='node', version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('unshelve_instance', 'cast', - instance=self.fake_instance_obj, host='host', image='image', - filter_properties={'fakeprop': 'fakeval'}, node='node', - version='3.15') - def test_volume_snapshot_create(self): self._test_compute_api('volume_snapshot_create', 'cast', instance=self.fake_instance_obj, volume_id='fake_id', create_info={}, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('volume_snapshot_create', 'cast', - instance=self.fake_instance_obj, volume_id='fake_id', - create_info={}, version='3.6') - def test_volume_snapshot_delete(self): self._test_compute_api('volume_snapshot_delete', 'cast', instance=self.fake_instance_obj, volume_id='fake_id', snapshot_id='fake_id2', delete_info={}, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('volume_snapshot_delete', 'cast', - instance=self.fake_instance_obj, volume_id='fake_id', - snapshot_id='fake_id2', delete_info={}, version='3.6') - def test_external_instance_event(self): self._test_compute_api('external_instance_event', 'cast', instances=[self.fake_instance_obj], events=['event'], version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('external_instance_event', 'cast', - instances=[self.fake_instance_obj], - events=['event'], - version='3.23') - def test_build_and_run_instance(self): self._test_compute_api('build_and_run_instance', 'cast', instance=self.fake_instance_obj, host='host', image='image', @@ -931,119 +477,10 @@ def test_build_and_run_instance(self): block_device_mapping=None, node='node', limits=[], version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('build_and_run_instance', 'cast', - instance=self.fake_instance_obj, host='host', image='image', - request_spec={'request': 'spec'}, filter_properties=[], - admin_password='passwd', injected_files=None, - requested_networks=['network1'], security_groups=None, - block_device_mapping=None, node='node', limits=[], - version='3.40') - - @mock.patch('nova.utils.is_neutron', return_value=True) - def test_build_and_run_instance_icehouse_compat(self, is_neutron): - self.flags(compute='icehouse', group='upgrade_levels') - self._test_compute_api('build_and_run_instance', 'cast', - instance=self.fake_instance_obj, host='host', image='image', - request_spec={'request': 'spec'}, filter_properties=[], - admin_password='passwd', injected_files=None, - requested_networks= objects_network_request.NetworkRequestList( - objects=[objects_network_request.NetworkRequest( - network_id="fake_network_id", address="10.0.0.1", - port_id="fake_port_id")]), - security_groups=None, - block_device_mapping=None, node='node', limits={}, - version='3.23') - - @mock.patch('nova.utils.is_neutron', return_value=False) - def test_build_and_run_instance_icehouse_compat_nova_net(self, is_neutron): - self.flags(compute='icehouse', group='upgrade_levels') - self._test_compute_api('build_and_run_instance', 'cast', - instance=self.fake_instance_obj, host='host', image='image', - request_spec={'request': 'spec'}, filter_properties=[], - admin_password='passwd', injected_files=None, - requested_networks= objects_network_request.NetworkRequestList( - objects=[objects_network_request.NetworkRequest( - network_id='fake_network_id', address='10.0.0.1')]), - security_groups=None, - block_device_mapping=None, node='node', limits={}, - version='3.23', nova_network=True) - def test_quiesce_instance(self): self._test_compute_api('quiesce_instance', 'call', instance=self.fake_instance_obj, version='4.0') - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('quiesce_instance', 'call', - instance=self.fake_instance_obj, version='3.39') - def test_unquiesce_instance(self): self._test_compute_api('unquiesce_instance', 'cast', instance=self.fake_instance_obj, mapping=None, version='4.0') - - self.flags(compute='3.40', group='upgrade_levels') - self._test_compute_api('unquiesce_instance', 'cast', - instance=self.fake_instance_obj, mapping=None, version='3.39') - - @mock.patch('nova.utils.is_neutron', return_value=True) - def test_build_and_run_instance_juno_compat(self, is_neutron): - self.flags(compute='juno', group='upgrade_levels') - self._test_compute_api('build_and_run_instance', 'cast', - instance=self.fake_instance_obj, host='host', image='image', - request_spec={'request': 'spec'}, filter_properties=[], - admin_password='passwd', injected_files=None, - requested_networks= objects_network_request.NetworkRequestList( - objects=[objects_network_request.NetworkRequest( - network_id="fake_network_id", address="10.0.0.1", - port_id="fake_port_id")]), - security_groups=None, - block_device_mapping=None, node='node', limits={}, - version='3.33') - - @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') - @mock.patch('nova.utils.is_neutron', return_value=True) - def test_build_and_run_instance_limits_juno_compat( - self, is_neutron, get_by_host_and_nodename): - host_topology = objects_numa.NUMATopology(cells=[ - objects_numa.NUMACell( - id=0, cpuset=set([1, 2]), memory=512, - cpu_usage=2, memory_usage=256, - pinned_cpus=set([1])), - objects_numa.NUMACell( - id=1, cpuset=set([3, 4]), memory=512, - cpu_usage=1, memory_usage=128, - pinned_cpus=set([])) - ]) - limits = objects_numa.NUMATopologyLimits( - cpu_allocation_ratio=16, - ram_allocation_ratio=2) - cnode = objects_compute_node.ComputeNode( - numa_topology=jsonutils.dumps( - host_topology.obj_to_primitive())) - - get_by_host_and_nodename.return_value = cnode - legacy_limits = jsonutils.dumps( - limits.to_dict_legacy(host_topology)) - - self.flags(compute='juno', group='upgrade_levels') - netreqs = objects_network_request.NetworkRequestList(objects=[ - objects_network_request.NetworkRequest( - network_id="fake_network_id", - address="10.0.0.1", - port_id="fake_port_id")]) - - self._test_compute_api('build_and_run_instance', 'cast', - instance=self.fake_instance_obj, - host='host', - image='image', - request_spec={'request': 'spec'}, - filter_properties=[], - admin_password='passwd', - injected_files=None, - requested_networks=netreqs, - security_groups=None, - block_device_mapping=None, - node='node', - limits={'numa_topology': limits}, - legacy_limits={'numa_topology': legacy_limits}, - version='3.33') diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py index 5a861564112..8f2d9917285 100644 --- a/nova/tests/unit/virt/xenapi/test_xenapi.py +++ b/nova/tests/unit/virt/xenapi/test_xenapi.py @@ -3000,7 +3000,8 @@ def setUp(self): values = {'name': 'test_aggr', 'metadata': {'availability_zone': 'test_zone', pool_states.POOL_FLAG: 'XenAPI'}} - self.aggr = db.aggregate_create(self.context, values) + self.aggr = objects.Aggregate(context=self.context, id=1, + **values) self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', 'master_compute': 'host', 'availability_zone': 'fake_zone', @@ -3244,18 +3245,17 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): fake_driver_add_to_aggregate) metadata = {pool_states.POOL_FLAG: "XenAPI", pool_states.KEY: pool_states.ACTIVE} - db.aggregate_metadata_add(self.context, self.aggr['id'], metadata) - db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host') + self.aggr.metadata = metadata + self.aggr.hosts = ['fake_host'] self.assertRaises(exception.AggregateError, self.compute.add_aggregate_host, self.context, host="fake_host", - aggregate=jsonutils.to_primitive(self.aggr), + aggregate=self.aggr, slave_info=None) - excepted = db.aggregate_get(self.context, self.aggr['id']) - self.assertEqual(excepted['metadetails'][pool_states.KEY], + self.assertEqual(self.aggr.metadata[pool_states.KEY], pool_states.ERROR) - self.assertEqual(excepted['hosts'], []) + self.assertEqual(self.aggr.hosts, ['fake_host']) class MockComputeAPI(object):