From 1c967593fbb0ab8b9dc8b0b509e388591d32f537 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Wed, 16 Nov 2016 11:09:58 -0500 Subject: [PATCH] rt: use a single ResourceTracker object instance This patch removes the ResourceTracker.nodename attribute, switches the compute manager to only create a single ResourceTracker object instance which itself now has an in-memory dict of ComputeNode objects that are managed by the nova-compute daemon. This isolates the code that manages ComputeNode objects and resources in just the resource tracker, which will make it possible for the scheduler report client to manage Ironic nodes and custom resource classes properly. The scheduler report client contains a cache of ResourceProvider, Inventory, and Allocation records. We definitely did not want to have multiple ResourceTracker object instances, each with their own cache of ResourceProvider objects. Having a single ResourceTracker and single scheduler report client is both more efficient and a cleaner interface into the compute manager. Co-Authored-By: Chris Dent Change-Id: I6827137f35c0cb4f9fc4c6f753d9a035326ed01b blueprint: custom-resource-classes --- nova/compute/claims.py | 15 +- nova/compute/manager.py | 56 ++---- nova/compute/resource_tracker.py | 182 ++++++++++-------- nova/tests/functional/test_compute_mgr.py | 6 +- nova/tests/unit/compute/test_claims.py | 16 +- nova/tests/unit/compute/test_compute.py | 110 +++++------ nova/tests/unit/compute/test_compute_mgr.py | 106 +++++----- .../tests/unit/compute/test_multiple_nodes.py | 168 ---------------- .../unit/compute/test_resource_tracker.py | 171 +++++++++------- nova/tests/unit/compute/test_shelve.py | 6 +- 10 files changed, 342 insertions(+), 494 deletions(-) delete mode 100644 nova/tests/unit/compute/test_multiple_nodes.py diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 7da82ded374..e69047a2988 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -74,11 +74,12 @@ class Claim(NopClaim): correct decisions with respect to host selection. """ - def __init__(self, context, instance, tracker, resources, pci_requests, - overhead=None, limits=None): + def __init__(self, context, instance, nodename, tracker, resources, + pci_requests, overhead=None, limits=None): super(Claim, self).__init__() # Stash a copy of the instance at the current point of time self.instance = instance.obj_clone() + self.nodename = nodename self._numa_topology_loaded = False self.tracker = tracker self._pci_requests = pci_requests @@ -122,7 +123,7 @@ def abort(self): """ LOG.debug("Aborting claim: %s", self, instance=self.instance) self.tracker.abort_instance_claim(self.context, self.instance, - self.instance.node) + self.nodename) def _claim_test(self, resources, limits=None): """Test if this claim can be satisfied given available resources and @@ -260,14 +261,14 @@ class MoveClaim(Claim): Move can be either a migrate/resize, live-migrate or an evacuate operation. """ - def __init__(self, context, instance, instance_type, image_meta, tracker, - resources, pci_requests, overhead=None, limits=None): + def __init__(self, context, instance, nodename, instance_type, image_meta, + tracker, resources, pci_requests, overhead=None, limits=None): self.context = context self.instance_type = instance_type if isinstance(image_meta, dict): image_meta = objects.ImageMeta.from_dict(image_meta) self.image_meta = image_meta - super(MoveClaim, self).__init__(context, instance, tracker, + super(MoveClaim, self).__init__(context, instance, nodename, tracker, resources, pci_requests, overhead=overhead, limits=limits) self.migration = None @@ -298,6 +299,6 @@ def abort(self): LOG.debug("Aborting claim: %s", self, instance=self.instance) self.tracker.drop_move_claim( self.context, - self.instance, self.instance.node, + self.instance, self.nodename, instance_type=self.instance_type) self.instance.drop_migration_context() diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3f83db64b3f..84b024863d7 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -514,7 +514,7 @@ def __init__(self, compute_driver=None, *args, **kwargs): self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() self.scheduler_client = scheduler_client.SchedulerClient() - self._resource_tracker_dict = {} + self._resource_tracker = None self.instance_events = InstanceEvents() self._sync_power_pool = eventlet.GreenPool( size=CONF.sync_power_state_pool_size) @@ -547,26 +547,17 @@ def reset(self): compute_rpcapi.LAST_VERSION = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() - def _get_resource_tracker(self, nodename): - rt = self._resource_tracker_dict.get(nodename) - if not rt: - if not self.driver.node_is_available(nodename): - raise exception.NovaException( - _("%s is not a valid node managed by this " - "compute host.") % nodename) - - rt = resource_tracker.ResourceTracker(self.host, - self.driver, - nodename) - self._resource_tracker_dict[nodename] = rt - return rt + def _get_resource_tracker(self): + if not self._resource_tracker: + rt = resource_tracker.ResourceTracker(self.host, self.driver) + self._resource_tracker = rt + return self._resource_tracker def _update_resource_tracker(self, context, instance): """Let the resource tracker know that an instance has changed state.""" - if (instance.host == self.host and - self.driver.node_is_available(instance.node)): - rt = self._get_resource_tracker(instance.node) + if instance.host == self.host: + rt = self._get_resource_tracker() rt.update_usage(context, instance, instance.node) def _instance_update(self, context, instance, **kwargs): @@ -1899,7 +1890,7 @@ def _build_and_run_instance(self, context, instance, image, injected_files, self._check_device_tagging(requested_networks, block_device_mapping) try: - rt = self._get_resource_tracker(node) + rt = self._get_resource_tracker() with rt.instance_claim(context, instance, node, limits): # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where @@ -2713,7 +2704,7 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref, LOG.info(_LI("Rebuilding instance"), instance=instance) if scheduled_node is not None: - rt = self._get_resource_tracker(scheduled_node) + rt = self._get_resource_tracker() rebuild_claim = rt.rebuild_claim else: rebuild_claim = claims.NopClaim @@ -3505,7 +3496,7 @@ def _confirm_resize(self, context, instance, quotas, with migration.obj_as_admin(): migration.save() - rt = self._get_resource_tracker(migration.source_node) + rt = self._get_resource_tracker() rt.drop_move_claim(context, instance, migration.source_node, old_instance_type, prefix='old_') instance.drop_migration_context() @@ -3597,7 +3588,7 @@ def revert_resize(self, context, instance, migration, reservations): instance.revert_migration_context() instance.save() - rt = self._get_resource_tracker(instance.node) + rt = self._get_resource_tracker() rt.drop_move_claim(context, instance, instance.node) self.compute_rpcapi.finish_revert_resize(context, instance, @@ -3715,7 +3706,7 @@ def _prep_resize(self, context, image, instance, instance_type, instance.save() limits = filter_properties.get('limits', {}) - rt = self._get_resource_tracker(node) + rt = self._get_resource_tracker() with rt.resize_claim(context, instance, instance_type, node, image_meta=image, limits=limits) as claim: LOG.info(_LI('Migrating'), instance=instance) @@ -4436,7 +4427,7 @@ def _unshelve_instance(self, context, instance, image, filter_properties, LOG.debug('No node specified, defaulting to %s', node, instance=instance) - rt = self._get_resource_tracker(node) + rt = self._get_resource_tracker() limits = filter_properties.get('limits', {}) shelved_image_ref = instance.image_ref @@ -6532,7 +6523,7 @@ def _reclaim_queued_deletes(self, context): def update_available_resource_for_node(self, context, nodename): - rt = self._get_resource_tracker(nodename) + rt = self._get_resource_tracker() try: rt.update_available_resource(context, nodename) except exception.ComputeHostNotFound: @@ -6544,18 +6535,17 @@ def update_available_resource_for_node(self, context, nodename): # that this will resolve itself on the next run. LOG.info(_LI("Compute node '%s' not found in " "update_available_resource."), nodename) - self._resource_tracker_dict.pop(nodename, None) + # TODO(jaypipes): Yes, this is inefficient to throw away all of the + # compute nodes to force a rebuild, but this is only temporary + # until Ironic baremetal node resource providers are tracked + # properly in the report client and this is a tiny edge case + # anyway. + self._resource_tracker = None return except Exception: LOG.exception(_LE("Error updating resources for node " "%(node)s."), {'node': nodename}) - # NOTE(comstud): Replace the RT cache before looping through - # compute nodes to delete below, as we can end up doing greenthread - # switches there. Best to have everyone using the newest cache - # ASAP. - self._resource_tracker_dict[nodename] = rt - @periodic_task.periodic_task(spacing=CONF.update_resources_interval) def update_available_resource(self, context): """See driver.get_available_resource() @@ -6572,10 +6562,6 @@ def update_available_resource(self, context): for nodename in nodenames: self.update_available_resource_for_node(context, nodename) - self._resource_tracker_dict = { - k: v for k, v in self._resource_tracker_dict.items() - if k in nodenames} - # Delete orphan compute node not reported by driver but still in db for cn in compute_nodes_in_db: if cn.hypervisor_hostname not in nodenames: diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index b580e947f9d..ba36a5b2c57 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -18,6 +18,7 @@ scheduler with useful information about availability through the ComputeNode model. """ +import collections import copy from oslo_log import log as logging @@ -81,18 +82,18 @@ class ResourceTracker(object): are built and destroyed. """ - def __init__(self, host, driver, nodename): + def __init__(self, host, driver): self.host = host self.driver = driver self.pci_tracker = None - self.nodename = nodename - self.compute_node = None + # Dict of objects.ComputeNode objects, keyed by nodename + self.compute_nodes = {} self.stats = stats.Stats() self.tracked_instances = {} self.tracked_migrations = {} monitor_handler = monitors.MonitorHandler(self) self.monitors = monitor_handler.monitors - self.old_resources = objects.ComputeNode() + self.old_resources = collections.defaultdict(objects.ComputeNode) self.scheduler_client = scheduler_client.SchedulerClient() self.ram_allocation_ratio = CONF.ram_allocation_ratio self.cpu_allocation_ratio = CONF.cpu_allocation_ratio @@ -116,9 +117,15 @@ def instance_claim(self, context, instance, nodename, limits=None): be used to revert the resource usage if an error occurs during the instance build. """ - if self.disabled: - # compute_driver doesn't support resource tracking, just - # set the 'host' and node fields and continue the build: + if self.disabled(nodename): + # instance_claim() was called before update_available_resource() + # (which ensures that a compute node exists for nodename). We + # shouldn't get here but in case we do, just set the instance's + # host and nodename attribute (probably incorrect) and return a + # NoopClaim. + # TODO(jaypipes): Remove all the disabled junk from the resource + # tracker. Servicegroup API-level active-checking belongs in the + # nova-compute manager. self._set_instance_host_and_node(instance, nodename) return claims.NopClaim() @@ -142,10 +149,10 @@ def instance_claim(self, context, instance, nodename, limits=None): "GB", {'flavor': instance.flavor.root_gb, 'overhead': overhead.get('disk_gb', 0)}) - cn = self.compute_node + cn = self.compute_nodes[nodename] pci_requests = objects.InstancePCIRequests.get_by_instance_uuid( context, instance.uuid) - claim = claims.Claim(context, instance, self, cn, + claim = claims.Claim(context, instance, nodename, self, cn, pci_requests, overhead=overhead, limits=limits) # self._set_instance_host_and_node() will save instance to the DB @@ -218,7 +225,7 @@ def _move_claim(self, context, instance, new_instance_type, nodename, new_instance_type, nodename, move_type) - if self.disabled: + if self.disabled(nodename): # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: return claims.NopClaim(migration=migration) @@ -232,7 +239,7 @@ def _move_claim(self, context, instance, new_instance_type, nodename, "GB", {'flavor': instance.flavor.root_gb, 'overhead': overhead.get('disk_gb', 0)}) - cn = self.compute_node + cn = self.compute_nodes[nodename] # TODO(moshele): we are recreating the pci requests even if # there was no change on resize. This will cause allocating @@ -249,8 +256,8 @@ def _move_claim(self, context, instance, new_instance_type, nodename, for request in instance.pci_requests.requests: if request.alias_name is None: new_pci_requests.requests.append(request) - claim = claims.MoveClaim(context, instance, new_instance_type, - image_meta, self, cn, + claim = claims.MoveClaim(context, instance, nodename, + new_instance_type, image_meta, self, cn, new_pci_requests, overhead=overhead, limits=limits) @@ -355,7 +362,7 @@ def abort_instance_claim(self, context, instance, nodename): instance.clear_numa_topology() self._unset_instance_host_and_node(instance) - self._update(context.elevated(), self.compute_node) + self._update(context.elevated(), self.compute_nodes[nodename]) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) def drop_move_claim(self, context, instance, nodename, @@ -384,14 +391,14 @@ def drop_move_claim(self, context, instance, nodename, self._update_usage(usage, nodename, sign=-1) ctxt = context.elevated() - self._update(ctxt, self.compute_node) + self._update(ctxt, self.compute_nodes[nodename]) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) def update_usage(self, context, instance, nodename): """Update the resource usage and stats after a change in an instance """ - if self.disabled: + if self.disabled(nodename): return uuid = instance['uuid'] @@ -400,11 +407,11 @@ def update_usage(self, context, instance, nodename): # claim first: if uuid in self.tracked_instances: self._update_usage_from_instance(context, instance, nodename) - self._update(context.elevated(), self.compute_node) + self._update(context.elevated(), self.compute_nodes[nodename]) - @property - def disabled(self): - return self.compute_node is None + def disabled(self, nodename): + return (nodename not in self.compute_nodes or + not self.driver.node_is_available(nodename)) def _init_compute_node(self, context, resources): """Initialize the compute node if it does not already exist. @@ -424,38 +431,41 @@ def _init_compute_node(self, context, resources): # if there is already a compute node just use resources # to initialize - if self.compute_node: - self._copy_resources(self.compute_node, resources) - self._setup_pci_tracker(context, resources) - self.scheduler_client.update_resource_stats(self.compute_node) + if nodename in self.compute_nodes: + cn = self.compute_nodes[nodename] + self._copy_resources(cn, resources) + self._setup_pci_tracker(context, cn, resources) + self.scheduler_client.update_resource_stats(cn) return # now try to get the compute node record from the # database. If we get one we use resources to initialize - self.compute_node = self._get_compute_node(context, nodename) - if self.compute_node: - self._copy_resources(self.compute_node, resources) - self._setup_pci_tracker(context, resources) - self.scheduler_client.update_resource_stats(self.compute_node) + cn = self._get_compute_node(context, nodename) + if cn: + self.compute_nodes[nodename] = cn + self._copy_resources(cn, resources) + self._setup_pci_tracker(context, cn, resources) + self.scheduler_client.update_resource_stats(cn) return # there was no local copy and none in the database # so we need to create a new compute node. This needs # to be initialized with resource values. - self.compute_node = objects.ComputeNode(context) - self.compute_node.host = self.host - self._copy_resources(self.compute_node, resources) - self.compute_node.create() + cn = objects.ComputeNode(context) + cn.host = self.host + self._copy_resources(cn, resources) + self.compute_nodes[nodename] = cn + cn.create() LOG.info(_LI('Compute_service record created for ' '%(host)s:%(node)s'), {'host': self.host, 'node': nodename}) - self._setup_pci_tracker(context, resources) - self.scheduler_client.update_resource_stats(self.compute_node) + self._setup_pci_tracker(context, cn, resources) + self.scheduler_client.update_resource_stats(cn) - def _setup_pci_tracker(self, context, resources): + def _setup_pci_tracker(self, context, compute_node, resources): if not self.pci_tracker: - n_id = self.compute_node.id if self.compute_node else None + n_id = compute_node.id self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id) if 'pci_passthrough_devices' in resources: dev_json = resources.pop('pci_passthrough_devices') @@ -463,7 +473,7 @@ def _setup_pci_tracker(self, context, resources): dev_json) dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj() - self.compute_node.pci_device_pools = dev_pools_obj + compute_node.pci_device_pools = dev_pools_obj def _copy_resources(self, compute_node, resources): """Copy resource values to supplied compute_node.""" @@ -564,12 +574,13 @@ def _update_available_resource(self, context, resources): # if it does not already exist. self._init_compute_node(context, resources) + nodename = resources['hypervisor_hostname'] + # if we could not init the compute node the tracker will be # disabled and we should quit now - if self.disabled: + if self.disabled(nodename): return - nodename = resources['hypervisor_hostname'] # Grab all instances assigned to this node: instances = objects.InstanceList.get_by_host_and_node( context, self.host, nodename, @@ -592,23 +603,25 @@ def _update_available_resource(self, context, resources): orphans = self._find_orphaned_instances() self._update_usage_from_orphans(orphans, nodename) + cn = self.compute_nodes[nodename] + # NOTE(yjiang5): Because pci device tracker status is not cleared in # this periodic task, and also because the resource tracker is not # notified when instances are deleted, we need remove all usages # from deleted instances. self.pci_tracker.clean_usage(instances, migrations, orphans) dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj() - self.compute_node.pci_device_pools = dev_pools_obj + cn.pci_device_pools = dev_pools_obj self._report_final_resource_view(nodename) metrics = self._get_host_metrics(context, nodename) # TODO(pmurray): metrics should not be a json string in ComputeNode, # but it is. This should be changed in ComputeNode - self.compute_node.metrics = jsonutils.dumps(metrics) + cn.metrics = jsonutils.dumps(metrics) # update the compute_node - self._update(context, self.compute_node) + self._update(context, cn) LOG.debug('Compute_service record updated for %(host)s:%(node)s', {'host': self.host, 'node': nodename}) @@ -664,10 +677,11 @@ def _report_final_resource_view(self, nodename): including instance calculations and in-progress resource claims. These values will be exposed via the compute node table to the scheduler. """ - vcpus = self.compute_node.vcpus + cn = self.compute_nodes[nodename] + vcpus = cn.vcpus if vcpus: tcpu = vcpus - ucpu = self.compute_node.vcpus_used + ucpu = cn.vcpus_used LOG.debug("Total usable vcpus: %(tcpu)s, " "total allocated vcpus: %(ucpu)s", {'tcpu': vcpus, @@ -675,8 +689,8 @@ def _report_final_resource_view(self, nodename): else: tcpu = 0 ucpu = 0 - pci_stats = (list(self.compute_node.pci_device_pools) if - self.compute_node.pci_device_pools else []) + pci_stats = (list(cn.pci_device_pools) if + cn.pci_device_pools else []) LOG.info(_LI("Final resource view: " "name=%(node)s " "phys_ram=%(phys_ram)sMB " @@ -687,18 +701,20 @@ def _report_final_resource_view(self, nodename): "used_vcpus=%(used_vcpus)s " "pci_stats=%(pci_stats)s"), {'node': nodename, - 'phys_ram': self.compute_node.memory_mb, - 'used_ram': self.compute_node.memory_mb_used, - 'phys_disk': self.compute_node.local_gb, - 'used_disk': self.compute_node.local_gb_used, + 'phys_ram': cn.memory_mb, + 'used_ram': cn.memory_mb_used, + 'phys_disk': cn.local_gb, + 'used_disk': cn.local_gb_used, 'total_vcpus': tcpu, 'used_vcpus': ucpu, 'pci_stats': pci_stats}) def _resource_change(self, compute_node): """Check to see if any resources have changed.""" - if not obj_base.obj_equal_prims(compute_node, self.old_resources): - self.old_resources = copy.deepcopy(compute_node) + nodename = compute_node.hypervisor_hostname + old_compute = self.old_resources[nodename] + if not obj_base.obj_equal_prims(compute_node, old_compute): + self.old_resources[nodename] = copy.deepcopy(compute_node) return True return False @@ -719,24 +735,23 @@ def _update_usage(self, usage, nodename, sign=1): mem_usage += overhead['memory_mb'] disk_usage += overhead.get('disk_gb', 0) - self.compute_node.memory_mb_used += sign * mem_usage - self.compute_node.local_gb_used += sign * disk_usage - self.compute_node.local_gb_used += sign * usage.get('ephemeral_gb', 0) - self.compute_node.vcpus_used += sign * usage.get('vcpus', 0) + cn = self.compute_nodes[nodename] + cn.memory_mb_used += sign * mem_usage + cn.local_gb_used += sign * disk_usage + cn.local_gb_used += sign * usage.get('ephemeral_gb', 0) + cn.vcpus_used += sign * usage.get('vcpus', 0) # free ram and disk may be negative, depending on policy: - self.compute_node.free_ram_mb = (self.compute_node.memory_mb - - self.compute_node.memory_mb_used) - self.compute_node.free_disk_gb = (self.compute_node.local_gb - - self.compute_node.local_gb_used) + cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used + cn.free_disk_gb = cn.local_gb - cn.local_gb_used - self.compute_node.running_vms = self.stats.num_instances + cn.running_vms = self.stats.num_instances # Calculate the numa usage free = sign == -1 updated_numa_topology = hardware.get_host_numa_usage_from_instance( - self.compute_node, usage, free) - self.compute_node.numa_topology = updated_numa_topology + cn, usage, free) + cn.numa_topology = updated_numa_topology def _get_migration_context_resource(self, resource, instance, prefix='new_'): @@ -811,6 +826,7 @@ def _update_usage_from_migration(self, context, instance, migration, 'numa_topology', instance, prefix='old_') if itype: + cn = self.compute_nodes[nodename] usage = self._get_usage_dict( itype, numa_topology=numa_topology) if self.pci_tracker and sign: @@ -819,10 +835,10 @@ def _update_usage_from_migration(self, context, instance, migration, self._update_usage(usage, nodename) if self.pci_tracker: obj = self.pci_tracker.stats.to_device_pools_obj() - self.compute_node.pci_device_pools = obj + cn.pci_device_pools = obj else: obj = objects.PciDevicePoolList() - self.compute_node.pci_device_pools = obj + cn.pci_device_pools = obj self.tracked_migrations[uuid] = migration def _update_usage_from_migrations(self, context, migrations, nodename): @@ -890,8 +906,9 @@ def _update_usage_from_instance(self, context, instance, nodename, self.tracked_instances.pop(uuid) sign = -1 + cn = self.compute_nodes[nodename] self.stats.update_stats_for_instance(instance, is_removed_instance) - self.compute_node.stats = copy.deepcopy(self.stats) + cn.stats = copy.deepcopy(self.stats) # if it's a new or deleted instance: if is_new_instance or is_removed_instance: @@ -900,17 +917,17 @@ def _update_usage_from_instance(self, context, instance, nodename, instance, sign=sign) self.scheduler_client.reportclient.update_instance_allocation( - self.compute_node, instance, sign) + cn, instance, sign) # new instance, update compute node resource usage: self._update_usage(self._get_usage_dict(instance), nodename, sign=sign) - self.compute_node.current_workload = self.stats.calculate_workload() + cn.current_workload = self.stats.calculate_workload() if self.pci_tracker: obj = self.pci_tracker.stats.to_device_pools_obj() - self.compute_node.pci_device_pools = obj + cn.pci_device_pools = obj else: - self.compute_node.pci_device_pools = objects.PciDevicePoolList() + cn.pci_device_pools = objects.PciDevicePoolList() def _update_usage_from_instances(self, context, instances, nodename): """Calculate resource usage based on instance utilization. This is @@ -920,25 +937,24 @@ def _update_usage_from_instances(self, context, instances, nodename): """ self.tracked_instances.clear() + cn = self.compute_nodes[nodename] # set some initial values, reserve room for host/hypervisor: - self.compute_node.local_gb_used = CONF.reserved_host_disk_mb / 1024 - self.compute_node.memory_mb_used = CONF.reserved_host_memory_mb - self.compute_node.vcpus_used = 0 - self.compute_node.free_ram_mb = (self.compute_node.memory_mb - - self.compute_node.memory_mb_used) - self.compute_node.free_disk_gb = (self.compute_node.local_gb - - self.compute_node.local_gb_used) - self.compute_node.current_workload = 0 - self.compute_node.running_vms = 0 + cn.local_gb_used = CONF.reserved_host_disk_mb / 1024 + cn.memory_mb_used = CONF.reserved_host_memory_mb + cn.vcpus_used = 0 + cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used) + cn.free_disk_gb = (cn.local_gb - cn.local_gb_used) + cn.current_workload = 0 + cn.running_vms = 0 for instance in instances: if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL: self._update_usage_from_instance(context, instance, nodename) self.scheduler_client.reportclient.remove_deleted_instances( - self.compute_node, self.tracked_instances.values()) - self.compute_node.free_ram_mb = max(0, self.compute_node.free_ram_mb) - self.compute_node.free_disk_gb = max(0, self.compute_node.free_disk_gb) + cn, self.tracked_instances.values()) + cn.free_ram_mb = max(0, cn.free_ram_mb) + cn.free_disk_gb = max(0, cn.free_disk_gb) def _find_orphaned_instances(self): """Given the set of instances and migrations already account for diff --git a/nova/tests/functional/test_compute_mgr.py b/nova/tests/functional/test_compute_mgr.py index 155f582f39d..f2808f83b79 100644 --- a/nova/tests/functional/test_compute_mgr.py +++ b/nova/tests/functional/test_compute_mgr.py @@ -56,13 +56,17 @@ def test_instance_fault_message_no_traceback_with_retry(self): project_id='fake') instance.create() + # Amongst others, mock the resource tracker, otherwise it will + # not have been sufficiently initialized and will raise a KeyError + # on the self.compute_nodes dict after the TestingException. @mock.patch.object(self.conductor.manager.compute_task_mgr, '_cleanup_allocated_networks') @mock.patch.object(self.compute.manager.network_api, 'cleanup_instance_network_on_host') @mock.patch('nova.compute.utils.notify_about_instance_usage') + @mock.patch.object(self.compute.manager, '_get_resource_tracker') @mock.patch.object(self.compute.manager.driver, 'spawn') - def _test(mock_spawn, mock_notify, mock_cinoh, mock_can): + def _test(mock_spawn, mock_grt, mock_notify, mock_cinoh, mock_can): mock_spawn.side_effect = test.TestingException('Preserve this') # Simulate that we're on the last retry attempt filter_properties = {'retry': {'num_attempts': 3}} diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py index 6bd25e34a67..55fa959fdda 100644 --- a/nova/tests/unit/compute/test_claims.py +++ b/nova/tests/unit/compute/test_claims.py @@ -28,6 +28,8 @@ from nova.tests.unit import fake_instance from nova.tests.unit.pci import fakes as pci_fakes +_NODENAME = 'fake-node' + class FakeResourceHandler(object): test_called = False @@ -91,9 +93,9 @@ def _claim(self, limits=None, overhead=None, requests=None, **kwargs): @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=db_numa_topology) def get_claim(mock_extra_get): - return claims.Claim(self.context, instance, self.tracker, - self.resources, requests, overhead=overhead, - limits=limits) + return claims.Claim(self.context, instance, _NODENAME, + self.tracker, self.resources, requests, + overhead=overhead, limits=limits) return get_claim() def _fake_instance(self, **kwargs): @@ -409,10 +411,10 @@ def _claim(self, limits=None, overhead=None, requests=None, @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=self.db_numa_topology) def get_claim(mock_extra_get, mock_numa_get): - return claims.MoveClaim(self.context, self.instance, instance_type, - image_meta, self.tracker, self.resources, - requests, overhead=overhead, - limits=limits) + return claims.MoveClaim(self.context, self.instance, _NODENAME, + instance_type, image_meta, self.tracker, + self.resources, requests, + overhead=overhead, limits=limits) return get_claim() @mock.patch('nova.objects.Instance.drop_migration_context') diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index a71a9e38f57..023bf10c035 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -157,8 +157,8 @@ def setUp(self): # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, - self.compute.driver, NODENAME) - self.compute._resource_tracker_dict[NODENAME] = fake_rt + self.compute.driver) + self.compute._resource_tracker = fake_rt def fake_get_compute_nodes_in_db(self, context, use_slave=False): fake_compute_nodes = [{'local_gb': 259, @@ -255,7 +255,7 @@ def fake_allocate_for_instance(cls, ctxt, instance, self.compute_api = compute.API() # Just to make long lines short - self.rt = self.compute._get_resource_tracker(NODENAME) + self.rt = self.compute._get_resource_tracker() def tearDown(self): ctxt = context.get_admin_context() @@ -1467,7 +1467,8 @@ def test_create_instance_unlimited_memory(self): self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) - self.assertEqual(999999999999, self.rt.compute_node.memory_mb_used) + cn = self.rt.compute_nodes[NODENAME] + self.assertEqual(999999999999, cn.memory_mb_used) def test_create_instance_unlimited_disk(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) @@ -1488,24 +1489,26 @@ def test_create_multiple_instances_then_starve(self): instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) - self.assertEqual(1024, self.rt.compute_node.memory_mb_used) - self.assertEqual(256, self.rt.compute_node.local_gb_used) + + cn = self.rt.compute_nodes[NODENAME] + self.assertEqual(1024, cn.memory_mb_used) + self.assertEqual(256, cn.local_gb_used) params = {"flavor": {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) - self.assertEqual(3072, self.rt.compute_node.memory_mb_used) - self.assertEqual(768, self.rt.compute_node.local_gb_used) + self.assertEqual(3072, cn.memory_mb_used) + self.assertEqual(768, cn.local_gb_used) params = {"flavor": {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) - self.assertEqual(3072, self.rt.compute_node.memory_mb_used) - self.assertEqual(768, self.rt.compute_node.local_gb_used) + self.assertEqual(3072, cn.memory_mb_used) + self.assertEqual(768, cn.local_gb_used) def test_create_multiple_instance_with_neutron_port(self): instance_type = flavors.get_default_flavor() @@ -1547,7 +1550,8 @@ def test_create_instance_with_oversubscribed_ram(self): self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) - self.assertEqual(instance_mb, self.rt.compute_node.memory_mb_used) + cn = self.rt.compute_nodes[NODENAME] + self.assertEqual(instance_mb, cn.memory_mb_used) def test_create_instance_with_oversubscribed_ram_fail(self): """Test passing of oversubscribed ram policy from the scheduler, but @@ -1594,7 +1598,8 @@ def test_create_instance_with_oversubscribed_cpu(self): self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) - self.assertEqual(2, self.rt.compute_node.vcpus_used) + cn = self.rt.compute_nodes[NODENAME] + self.assertEqual(2, cn.vcpus_used) # create one more instance: params = {"flavor": {"memory_mb": 10, "root_gb": 1, @@ -1603,13 +1608,13 @@ def test_create_instance_with_oversubscribed_cpu(self): self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) - self.assertEqual(3, self.rt.compute_node.vcpus_used) + self.assertEqual(3, cn.vcpus_used) # delete the instance: instance['vm_state'] = vm_states.DELETED self.rt.update_usage(self.context, instance, NODENAME) - self.assertEqual(2, self.rt.compute_node.vcpus_used) + self.assertEqual(2, cn.vcpus_used) # now oversubscribe vcpus and fail: params = {"flavor": {"memory_mb": 10, "root_gb": 1, @@ -1644,7 +1649,8 @@ def test_create_instance_with_oversubscribed_disk(self): self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) - self.assertEqual(instance_gb, self.rt.compute_node.local_gb_used) + cn = self.rt.compute_nodes[NODENAME] + self.assertEqual(instance_gb, cn.local_gb_used) def test_create_instance_with_oversubscribed_disk_fail(self): """Test passing of oversubscribed disk policy from the scheduler, but @@ -5240,7 +5246,7 @@ def fake_confirm_migration_driver(*args, **kwargs): reservations = list('fake_res') # Get initial memory usage - memory_mb_used = self.rt.compute_node.memory_mb_used + memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) @@ -5252,7 +5258,7 @@ def fake_confirm_migration_driver(*args, **kwargs): self.assertEqual(flavor.flavorid, '1') # Memory usage should have increased by the claim - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb) instance.vm_state = old_vm_state @@ -5268,7 +5274,7 @@ def fake_confirm_migration_driver(*args, **kwargs): filter_properties={}, node=None, clean_shutdown=True) # Memory usage should increase after the resize as well - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb + new_instance_type_ref.memory_mb) @@ -5297,7 +5303,7 @@ def fake_confirm_migration_driver(*args, **kwargs): disk_info={}, image={}, instance=instance) # Memory usage shouldn't had changed - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb + new_instance_type_ref.memory_mb) @@ -5318,7 +5324,7 @@ def fake_confirm_migration_driver(*args, **kwargs): # Resources from the migration (based on initial flavor) should # be freed now - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + new_instance_type_ref.memory_mb) instance.refresh() @@ -5408,7 +5414,8 @@ def test_confirm_resize_with_numa_topology_and_cpu_pinning(self): self.rt.tracked_migrations[instance.uuid] = (migration, instance.flavor) - self.rt.compute_node.numa_topology = jsonutils.dumps( + cn = self.rt.compute_nodes[NODENAME] + cn.numa_topology = jsonutils.dumps( host_numa_topology.obj_to_primitive()) with mock.patch.object(self.compute.network_api, @@ -5419,7 +5426,7 @@ def test_confirm_resize_with_numa_topology_and_cpu_pinning(self): self.assertEqual(vm_states.ACTIVE, instance['vm_state']) updated_topology = objects.NUMATopology.obj_from_primitive( - jsonutils.loads(self.rt.compute_node.numa_topology)) + jsonutils.loads(cn.numa_topology)) # after confirming resize all cpus on currect host must be free self.assertEqual(2, len(updated_topology.cells)) @@ -5526,7 +5533,7 @@ def fake_finish_revert_migration_driver(*args, **kwargs): reservations = list('fake_res') # Get initial memory usage - memory_mb_used = self.rt.compute_node.memory_mb_used + memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) @@ -5537,7 +5544,7 @@ def fake_finish_revert_migration_driver(*args, **kwargs): self.assertEqual(flavor.flavorid, '1') # Memory usage should have increased by the claim - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb) old_vm_state = instance['vm_state'] @@ -5555,7 +5562,7 @@ def fake_finish_revert_migration_driver(*args, **kwargs): clean_shutdown=True) # Memory usage should increase after the resize as well - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb + new_instance_type_ref.memory_mb) @@ -5583,7 +5590,7 @@ def fake_finish_revert_migration_driver(*args, **kwargs): disk_info={}, image={}, instance=instance) # Memory usage shouldn't had changed - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb + new_instance_type_ref.memory_mb) @@ -5603,7 +5610,7 @@ def fake_finish_revert_migration_driver(*args, **kwargs): # Resources from the migration (based on initial flavor) should # be freed now - self.assertEqual(self.rt.compute_node.memory_mb_used, + self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used, memory_mb_used + flavor.memory_mb) instance.refresh() @@ -6954,29 +6961,17 @@ def test_instance_build_timeout_mixed_instances(self): conductor_instance_update.assert_has_calls([ mock.call()]) - def test_get_resource_tracker_fail(self): - self.assertRaises(exception.NovaException, - self.compute._get_resource_tracker, - 'invalidnodename') - @mock.patch.object(objects.Instance, 'save') def test_instance_update_host_check(self, mock_save): # make sure rt usage doesn't happen if the host or node is different - def fail_get(self, nodename): - raise test.TestingException("wrong host/node") + def fail_get(self): + raise test.TestingException("wrong host") self.stub_out('nova.compute.manager.ComputeManager.' '_get_resource_tracker', fail_get) instance = self._create_fake_instance_obj({'host': 'someotherhost'}) self.compute._instance_update(self.context, instance, vcpus=4) - instance = self._create_fake_instance_obj({'node': 'someothernode'}) - self.compute._instance_update(self.context, instance, vcpus=4) - - params = {'host': 'someotherhost', 'node': 'someothernode'} - instance = self._create_fake_instance_obj(params) - self.compute._instance_update(self.context, instance, vcpus=4) - @mock.patch.object(compute_manager.ComputeManager, '_get_instances_on_driver') @mock.patch.object(compute_manager.ComputeManager, @@ -7176,18 +7171,19 @@ def fake_destroy(self): self.assertNotEqual(0, instance.deleted) def test_terminate_instance_updates_tracker(self): - rt = self.compute._get_resource_tracker(NODENAME) + rt = self.compute._get_resource_tracker() admin_context = context.get_admin_context() - self.assertEqual(0, rt.compute_node.vcpus_used) + cn = self.rt.compute_nodes[NODENAME] + self.assertEqual(0, cn.vcpus_used) instance = self._create_fake_instance_obj() instance.vcpus = 1 rt.instance_claim(admin_context, instance, NODENAME) - self.assertEqual(1, rt.compute_node.vcpus_used) + self.assertEqual(1, cn.vcpus_used) self.compute.terminate_instance(admin_context, instance, [], []) - self.assertEqual(0, rt.compute_node.vcpus_used) + self.assertEqual(0, cn.vcpus_used) @mock.patch('nova.compute.manager.ComputeManager' '._notify_about_instance_usage') @@ -7197,23 +7193,24 @@ def test_terminate_instance_updates_tracker(self): # update properly. @mock.patch('nova.objects.Instance.destroy') def test_init_deleted_instance_updates_tracker(self, noop1, noop2, noop3): - rt = self.compute._get_resource_tracker(NODENAME) + rt = self.compute._get_resource_tracker() admin_context = context.get_admin_context() - self.assertEqual(0, rt.compute_node.vcpus_used) + cn = rt.compute_nodes[NODENAME] + self.assertEqual(0, cn.vcpus_used) instance = self._create_fake_instance_obj() instance.vcpus = 1 - self.assertEqual(0, rt.compute_node.vcpus_used) + self.assertEqual(0, cn.vcpus_used) rt.instance_claim(admin_context, instance, NODENAME) self.compute._init_instance(admin_context, instance) - self.assertEqual(1, rt.compute_node.vcpus_used) + self.assertEqual(1, cn.vcpus_used) instance.vm_state = vm_states.DELETED self.compute._init_instance(admin_context, instance) - self.assertEqual(0, rt.compute_node.vcpus_used) + self.assertEqual(0, cn.vcpus_used) def test_init_instance_for_partial_deletion(self): admin_context = context.get_admin_context() @@ -7515,9 +7512,6 @@ def test_allow_confirm_resize_on_instance_in_deleting_task_state(self): def fake_drop_move_claim(*args, **kwargs): pass - def fake_get_resource_tracker(self): - return fake_rt - def fake_setup_networks_on_host(self, *args, **kwargs): pass @@ -7525,7 +7519,7 @@ def fake_setup_networks_on_host(self, *args, **kwargs): mock.patch.object(fake_rt, 'drop_move_claim', side_effect=fake_drop_move_claim), mock.patch.object(self.compute, '_get_resource_tracker', - side_effect=fake_get_resource_tracker), + return_value=fake_rt), mock.patch.object(self.compute.network_api, 'setup_networks_on_host', side_effect=fake_setup_networks_on_host) @@ -11517,7 +11511,7 @@ def setUp(self): self.inst.save() def fake_get_compute_info(cls, context, host): - cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename) + cn = objects.ComputeNode(hypervisor_hostname=NODENAME) return cn self.stub_out('nova.compute.manager.ComputeManager._get_compute_info', @@ -11565,7 +11559,7 @@ def test_rebuild_on_host_updated_target(self): def fake_get_compute_info(context, host): self.assertTrue(context.is_admin) self.assertEqual('fake-mini', host) - cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename) + cn = objects.ComputeNode(hypervisor_hostname=NODENAME) return cn with test.nested( @@ -11821,7 +11815,7 @@ def test_rebuild_migration_claim_fails(self): patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) patch_claim = mock.patch.object( - self.compute._resource_tracker_dict[NODENAME], 'rebuild_claim', + self.compute._resource_tracker, 'rebuild_claim', side_effect=exception.ComputeResourcesUnavailable(reason="boom")) with patch_spawn, patch_on_disk, patch_claim: self.assertRaises(exception.BuildAbortException, @@ -11837,7 +11831,7 @@ def test_rebuild_fails_migration_failed(self): patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) patch_claim = mock.patch.object( - self.compute._resource_tracker_dict[NODENAME], 'rebuild_claim') + self.compute._resource_tracker, 'rebuild_claim') patch_rebuild = mock.patch.object( self.compute, '_do_rebuild_instance_with_claim', side_effect=test.TestingException()) diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 918592659dc..90fa76d6fed 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -172,80 +172,62 @@ def _make_compute_node(self, hyp_hostname, cn_id): cn.hypervisor_hostname = hyp_hostname return cn - def _make_rt(self, node): - n = mock.Mock(spec_set=['update_available_resource', - 'nodename']) - n.nodename = node - return n - @mock.patch.object(manager.ComputeManager, '_get_resource_tracker') - @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes') - @mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db') - def test_update_available_resource_for_node( - self, get_db_nodes, get_avail_nodes, get_rt): - db_nodes = [] - - db_nodes = [self._make_compute_node('node%s' % i, i) - for i in range(1, 5)] - avail_nodes = set(['node2', 'node3', 'node4', 'node5']) - avail_nodes_l = list(avail_nodes) - rts = [self._make_rt(node) for node in avail_nodes_l] - # Make the 2nd and 3rd ones raise - exc = exception.ComputeHostNotFound(host=uuids.fake_host) - rts[1].update_available_resource.side_effect = exc - exc = test.TestingException() - rts[2].update_available_resource.side_effect = exc - get_db_nodes.return_value = db_nodes - get_avail_nodes.return_value = avail_nodes - get_rt.side_effect = rts + def test_update_available_resource_for_node(self, get_rt): + rt = mock.Mock(spec_set=['update_available_resource']) + get_rt.return_value = rt - self.compute.update_available_resource_for_node(self.context, - avail_nodes_l[0]) - self.assertEqual(self.compute._resource_tracker_dict[avail_nodes_l[0]], - rts[0]) - - # Update ComputeHostNotFound - self.compute.update_available_resource_for_node(self.context, - avail_nodes_l[1]) - self.assertNotIn(self.compute._resource_tracker_dict, avail_nodes_l[1]) - - # Update TestException - self.compute.update_available_resource_for_node(self.context, - avail_nodes_l[2]) - self.assertEqual(self.compute._resource_tracker_dict[ - avail_nodes_l[2]], rts[2]) + self.compute.update_available_resource_for_node( + self.context, + mock.sentinel.node, + ) + rt.update_available_resource.assert_called_once_with( + self.context, + mock.sentinel.node, + ) + @mock.patch('nova.compute.manager.LOG') @mock.patch.object(manager.ComputeManager, '_get_resource_tracker') + def test_update_available_resource_for_node_fail_no_host(self, get_rt, + log_mock): + rt = mock.Mock(spec_set=['update_available_resource']) + exc = exception.ComputeHostNotFound(host=mock.sentinel.host) + rt.update_available_resource.side_effect = exc + get_rt.return_value = rt + # Fake out the RT on the compute manager object so we can assert it's + # nulled out after the ComputeHostNotFound exception is raised. + self.compute._resource_tracker = rt + + self.compute.update_available_resource_for_node( + self.context, + mock.sentinel.node, + ) + rt.update_available_resource.assert_called_once_with( + self.context, + mock.sentinel.node, + ) + self.assertTrue(log_mock.info.called) + self.assertIsNone(self.compute._resource_tracker) + + @mock.patch.object(manager.ComputeManager, + 'update_available_resource_for_node') @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes') @mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db') def test_update_available_resource(self, get_db_nodes, get_avail_nodes, - get_rt): + update_mock): db_nodes = [self._make_compute_node('node%s' % i, i) for i in range(1, 5)] avail_nodes = set(['node2', 'node3', 'node4', 'node5']) avail_nodes_l = list(avail_nodes) - rts = [self._make_rt(node) for node in avail_nodes_l] - # Make the 2nd and 3rd ones raise - exc = exception.ComputeHostNotFound(host='fake') - rts[1].update_available_resource.side_effect = exc - exc = test.TestingException() - rts[2].update_available_resource.side_effect = exc - expected_rt_dict = {avail_nodes_l[0]: rts[0], - avail_nodes_l[2]: rts[2], - avail_nodes_l[3]: rts[3]} get_db_nodes.return_value = db_nodes get_avail_nodes.return_value = avail_nodes - get_rt.side_effect = rts self.compute.update_available_resource(self.context) get_db_nodes.assert_called_once_with(self.context, use_slave=True) - self.assertEqual(sorted([mock.call(node) for node in avail_nodes]), - sorted(get_rt.call_args_list)) - for rt, node in zip(rts, avail_nodes_l): - rt.update_available_resource.assert_called_once_with(self.context, - node) - self.assertEqual(expected_rt_dict, - self.compute._resource_tracker_dict) + update_mock.has_calls( + [mock.call(self.context, node) for node in avail_nodes_l] + ) + # First node in set should have been removed from DB for db_node in db_nodes: if db_node.hypervisor_hostname == 'node1': @@ -3379,8 +3361,8 @@ def fake_network_info(): # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, - self.compute.driver, self.node) - self.compute._resource_tracker_dict[self.node] = fake_rt + self.compute.driver) + self.compute._resource_tracker = fake_rt def _do_build_instance_update(self, mock_save, reschedule_update=False): mock_save.return_value = self.instance @@ -4159,7 +4141,7 @@ def test_reschedule_on_resources_unavailable(self, mock_get_resource, self._instance_action_events(mock_start, mock_finish) self._assert_build_instance_update(mock_save, reschedule_update=True) - mock_get_resource.assert_called_once_with(self.node) + mock_get_resource.assert_called_once_with() mock_notify.assert_has_calls([ mock.call(self.context, self.instance, 'create.start', extra_usage_info= {'image_name': self.image.get('name')}), @@ -4823,7 +4805,7 @@ def do_test(notify_about_instance_usage, do_test() def test_finish_revert_resize_migration_context(self): - fake_rt = resource_tracker.ResourceTracker(None, None, None) + fake_rt = resource_tracker.ResourceTracker(None, None) fake_rt.tracked_migrations[self.instance['uuid']] = ( self.migration, None) diff --git a/nova/tests/unit/compute/test_multiple_nodes.py b/nova/tests/unit/compute/test_multiple_nodes.py deleted file mode 100644 index a3f4d3c5e1d..00000000000 --- a/nova/tests/unit/compute/test_multiple_nodes.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for compute service with multiple compute nodes.""" - -from nova.compute import manager -import nova.conf -from nova import context -from nova import objects -from nova import test -from nova.tests import uuidsentinel -from nova.virt import fake - - -CONF = nova.conf.CONF - - -class BaseTestCase(test.TestCase): - def tearDown(self): - fake.restore_nodes() - super(BaseTestCase, self).tearDown() - - -class FakeDriverSingleNodeTestCase(BaseTestCase): - def setUp(self): - super(FakeDriverSingleNodeTestCase, self).setUp() - self.driver = fake.FakeDriver(virtapi=None) - fake.set_nodes(['xyz']) - - def test_get_available_resource(self): - res = self.driver.get_available_resource('xyz') - self.assertEqual(res['hypervisor_hostname'], 'xyz') - - -class FakeDriverMultiNodeTestCase(BaseTestCase): - def setUp(self): - super(FakeDriverMultiNodeTestCase, self).setUp() - self.driver = fake.FakeDriver(virtapi=None) - fake.set_nodes(['aaa', 'bbb']) - - def test_get_available_resource(self): - res_a = self.driver.get_available_resource('aaa') - self.assertEqual(res_a['hypervisor_hostname'], 'aaa') - - res_b = self.driver.get_available_resource('bbb') - self.assertEqual(res_b['hypervisor_hostname'], 'bbb') - - res_x = self.driver.get_available_resource('xxx') - self.assertEqual(res_x, {}) - - -class MultiNodeComputeTestCase(BaseTestCase): - def setUp(self): - super(MultiNodeComputeTestCase, self).setUp() - self.flags(compute_driver='fake.FakeDriver') - self.compute = manager.ComputeManager() - - def fake_get_compute_nodes_in_db(context, use_slave=False): - fake_compute_nodes = [{'local_gb': 259, - 'uuid': uuidsentinel.fake_compute, - 'vcpus_used': 0, - 'deleted': 0, - 'hypervisor_type': 'powervm', - 'created_at': '2013-04-01T00:27:06.000000', - 'local_gb_used': 0, - 'updated_at': '2013-04-03T00:35:41.000000', - 'hypervisor_hostname': 'fake_phyp1', - 'memory_mb_used': 512, - 'memory_mb': 131072, - 'current_workload': 0, - 'vcpus': 16, - 'cpu_info': 'ppc64,powervm,3940', - 'running_vms': 0, - 'free_disk_gb': 259, - 'service_id': 7, - 'hypervisor_version': 7, - 'disk_available_least': 265856, - 'deleted_at': None, - 'free_ram_mb': 130560, - 'metrics': '', - 'numa_topology': '', - 'stats': '', - 'id': 2, - 'host': 'fake_phyp1', - 'cpu_allocation_ratio': None, - 'ram_allocation_ratio': None, - 'disk_allocation_ratio': None, - 'host_ip': '127.0.0.1'}] - return [objects.ComputeNode._from_db_object( - context, objects.ComputeNode(), cn) - for cn in fake_compute_nodes] - - def fake_compute_node_delete(context, compute_node_id): - self.assertEqual(2, compute_node_id) - - self.stubs.Set(self.compute, '_get_compute_nodes_in_db', - fake_get_compute_nodes_in_db) - self.stub_out('nova.db.compute_node_delete', - fake_compute_node_delete) - - def test_update_available_resource_add_remove_node(self): - ctx = context.get_admin_context() - fake.set_nodes(['A', 'B', 'C']) - self.compute.update_available_resource(ctx) - self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), - ['A', 'B', 'C']) - - fake.set_nodes(['A', 'B']) - self.compute.update_available_resource(ctx) - self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), - ['A', 'B']) - - fake.set_nodes(['A', 'B', 'C']) - self.compute.update_available_resource(ctx) - self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), - ['A', 'B', 'C']) - - def test_compute_manager_removes_deleted_node(self): - ctx = context.get_admin_context() - fake.set_nodes(['A', 'B']) - - fake_compute_nodes = [ - objects.ComputeNode( - context=ctx, hypervisor_hostname='A', id=2), - objects.ComputeNode( - context=ctx, hypervisor_hostname='B', id=3), - ] - - def fake_get_compute_nodes_in_db(context, use_slave=False): - return fake_compute_nodes - - def fake_compute_node_delete(context, compute_node_id): - for cn in fake_compute_nodes: - if compute_node_id == cn.id: - fake_compute_nodes.remove(cn) - return - - self.stubs.Set(self.compute, '_get_compute_nodes_in_db', - fake_get_compute_nodes_in_db) - self.stub_out('nova.db.compute_node_delete', - fake_compute_node_delete) - - self.compute.update_available_resource(ctx) - - # Verify nothing is deleted if driver and db compute nodes match - self.assertEqual(len(fake_compute_nodes), 2) - self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), - ['A', 'B']) - - fake.set_nodes(['A']) - self.compute.update_available_resource(ctx) - - # Verify B gets deleted since now only A is reported by driver - self.assertEqual(len(fake_compute_nodes), 1) - self.assertEqual(fake_compute_nodes[0].hypervisor_hostname, 'A') - self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), - ['A']) diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py index 2113f4d4119..1d6de53a4f0 100644 --- a/nova/tests/unit/compute/test_resource_tracker.py +++ b/nova/tests/unit/compute/test_resource_tracker.py @@ -65,7 +65,7 @@ local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'], hypervisor_type='fake', hypervisor_version=0, - hypervisor_hostname=_HOSTNAME, + hypervisor_hostname=_NODENAME, free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] - _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']), free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] - @@ -406,7 +406,7 @@ def overhead_zero(instance): } -def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, +def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): """Sets up the resource tracker instance with mock fixtures. @@ -430,7 +430,7 @@ def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, mock.patch('nova.scheduler.client.SchedulerClient', return_value=sched_client_mock), mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)): - rt = resource_tracker.ResourceTracker(hostname, vd, nodename) + rt = resource_tracker.ResourceTracker(hostname, vd) return (rt, sched_client_mock, vd) @@ -456,7 +456,7 @@ def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): (self.rt, self.sched_client_mock, self.driver_mock) = setup_rt( - _HOSTNAME, _NODENAME, virt_resources, estimate_overhead) + _HOSTNAME, virt_resources, estimate_overhead) class TestUpdateAvailableResources(BaseTestCase): @@ -471,6 +471,36 @@ def _update_available_resources(self): self.rt.update_available_resource(mock.sentinel.ctx, _NODENAME) return update_mock + @mock.patch('nova.objects.InstancePCIRequests.get_by_instance', + return_value=objects.InstancePCIRequests(requests=[])) + @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', + return_value=objects.PciDeviceList()) + @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') + @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') + @mock.patch('nova.objects.InstanceList.get_by_host_and_node') + def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock, + instance_pci_mock): + self._setup_rt() + + # Set up resource tracker in an enabled state and verify that all is + # good before simulating a disabled node. + get_mock.return_value = [] + migr_mock.return_value = [] + get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] + + update_mock = self._update_available_resources() + + self.assertTrue(update_mock.called) + + update_mock.reset_mock() + + # OK, now simulate a node being disabled by the Ironic virt driver. + vd = self.driver_mock + vd.node_is_available.return_value = False + update_mock = self._update_available_resources() + + self.assertFalse(update_mock.called) + @mock.patch('nova.objects.InstancePCIRequests.get_by_instance', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', @@ -935,7 +965,7 @@ def test_no_op_init_compute_node(self, get_mock, service_mock, resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) - self.rt.compute_node = compute_node + self.rt.compute_nodes[_NODENAME] = compute_node self.rt._init_compute_node(mock.sentinel.ctx, resources) @@ -943,7 +973,6 @@ def test_no_op_init_compute_node(self, get_mock, service_mock, self.assertFalse(get_mock.called) self.assertFalse(create_mock.called) self.assertTrue(pci_mock.called) - self.assertFalse(self.rt.disabled) self.assertTrue(self.sched_client_mock.update_resource_stats.called) @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', @@ -966,7 +995,6 @@ def fake_get_node(_ctx, host, node): get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME, _NODENAME) self.assertFalse(create_mock.called) - self.assertFalse(self.rt.disabled) self.assertTrue(self.sched_client_mock.update_resource_stats.called) @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', @@ -1032,17 +1060,17 @@ def set_cn_id(): # The PCI tracker needs the compute node's ID when starting up, so # make sure that we set the ID value so we don't get a Cannot load # 'id' in base class error - self.rt.compute_node.id = 42 # Has to be a number, not a mock + cn = self.rt.compute_nodes[_NODENAME] + cn.id = 42 # Has to be a number, not a mock create_mock.side_effect = set_cn_id self.rt._init_compute_node(mock.sentinel.ctx, resources) - self.assertFalse(self.rt.disabled) + cn = self.rt.compute_nodes[_NODENAME] get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME, _NODENAME) create_mock.assert_called_once_with() - self.assertTrue(obj_base.obj_equal_prims(expected_compute, - self.rt.compute_node)) + self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn)) pci_tracker_mock.assert_called_once_with(mock.sentinel.ctx, 42) self.assertTrue(self.sched_client_mock.update_resource_stats.called) @@ -1058,8 +1086,8 @@ def test_existing_compute_node_updated_same_resources(self, service_mock): # are checking below to see that update_resource_stats() is not # needlessly called when the resources don't actually change. orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone() - self.rt.compute_node = orig_compute - self.rt.old_resources = orig_compute + self.rt.compute_nodes[_NODENAME] = orig_compute + self.rt.old_resources[_NODENAME] = orig_compute new_compute = orig_compute.obj_clone() @@ -1076,8 +1104,8 @@ def test_existing_compute_node_updated_new_resources(self, service_mock): self._setup_rt() orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone() - self.rt.compute_node = orig_compute - self.rt.old_resources = orig_compute + self.rt.compute_nodes[_NODENAME] = orig_compute + self.rt.old_resources[_NODENAME] = orig_compute # Deliberately changing local_gb_used, vcpus_used, and memory_mb_used # below to be different from the compute node fixture's base usages. @@ -1100,7 +1128,8 @@ def setUp(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self._setup_rt() - self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) + cn = _COMPUTE_NODE_FIXTURES[0].obj_clone() + self.rt.compute_nodes[_NODENAME] = cn # not using mock.sentinel.ctx because instance_claim calls #elevated self.ctx = mock.MagicMock() @@ -1132,8 +1161,8 @@ def assertEqualNUMAHostTopology(self, expected, got): {'expected': expected, 'got': got}) def test_claim_disabled(self): - self.rt.compute_node = None - self.assertTrue(self.rt.disabled) + self.rt.compute_nodes = {} + self.assertTrue(self.rt.disabled(_NODENAME)) with mock.patch.object(self.instance, 'save'): claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance, @@ -1153,8 +1182,8 @@ def test_update_usage_with_claim(self, migr_mock, pci_mock): expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.update_usage(self.ctx, self.instance, _NODENAME) - self.assertTrue(obj_base.obj_equal_prims(expected, - self.rt.compute_node)) + cn = self.rt.compute_nodes[_NODENAME] + self.assertTrue(obj_base.obj_equal_prims(expected, cn)) disk_used = self.instance.root_gb + self.instance.ephemeral_gb vals = { @@ -1179,7 +1208,7 @@ def test_update_usage_with_claim(self, migr_mock, pci_mock): with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, _NODENAME, None) - cn = self.rt.compute_node + cn = self.rt.compute_nodes[_NODENAME] update_mock.assert_called_once_with(self.elevated, cn) self.assertTrue(obj_base.obj_equal_prims(expected, cn)) @@ -1214,7 +1243,7 @@ def test_update_usage_removed(self, migr_mock, pci_mock): with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, _NODENAME, None) - cn = self.rt.compute_node + cn = self.rt.compute_nodes[_NODENAME] update_mock.assert_called_once_with(self.elevated, cn) self.assertTrue(obj_base.obj_equal_prims(expected, cn)) @@ -1235,14 +1264,12 @@ def test_update_usage_removed(self, migr_mock, pci_mock): self.instance.vm_state = vm_states.SHELVED_OFFLOADED with mock.patch.object(self.rt, '_update') as update_mock: self.rt.update_usage(self.ctx, self.instance, _NODENAME) - self.assertTrue(obj_base.obj_equal_prims(expected_updated, - self.rt.compute_node)) + cn = self.rt.compute_nodes[_NODENAME] + self.assertTrue(obj_base.obj_equal_prims(expected_updated, cn)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim(self, migr_mock, pci_mock): - self.assertFalse(self.rt.disabled) - pci_mock.return_value = objects.InstancePCIRequests(requests=[]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb @@ -1269,7 +1296,7 @@ def test_claim(self, migr_mock, pci_mock): with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, _NODENAME, None) - cn = self.rt.compute_node + cn = self.rt.compute_nodes[_NODENAME] update_mock.assert_called_once_with(self.elevated, cn) self.assertTrue(obj_base.obj_equal_prims(expected, cn)) @@ -1285,7 +1312,6 @@ def test_claim_with_pci(self, migr_mock, pci_mock, pci_stats_mock): # Test that a claim involving PCI requests correctly claims # PCI devices on the host and sends an updated pci_device_pools # attribute of the ComputeNode object. - self.assertFalse(self.rt.disabled) # TODO(jaypipes): Remove once the PCI tracker is always created # upon the resource tracker being initialized... @@ -1327,7 +1353,7 @@ def test_claim_with_pci(self, migr_mock, pci_mock, pci_stats_mock): with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, _NODENAME, None) - cn = self.rt.compute_node + cn = self.rt.compute_nodes[_NODENAME] update_mock.assert_called_once_with(self.elevated, cn) pci_stats_mock.assert_called_once_with([request]) self.assertTrue(obj_base.obj_equal_prims(expected, cn)) @@ -1337,9 +1363,10 @@ def test_claim_with_pci(self, migr_mock, pci_mock, pci_stats_mock): def test_claim_abort_context_manager(self, migr_mock, pci_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) - self.assertEqual(0, self.rt.compute_node.local_gb_used) - self.assertEqual(0, self.rt.compute_node.memory_mb_used) - self.assertEqual(0, self.rt.compute_node.running_vms) + cn = self.rt.compute_nodes[_NODENAME] + self.assertEqual(0, cn.local_gb_used) + self.assertEqual(0, cn.memory_mb_used) + self.assertEqual(0, cn.running_vms) mock_save = mock.MagicMock() mock_clear_numa = mock.MagicMock() @@ -1366,9 +1393,9 @@ def _doit(mock_clone): # Assert that the resources claimed by the Claim() constructor # are returned to the resource tracker due to the claim's abort() # method being called when triggered by the exception raised above. - self.assertEqual(0, self.rt.compute_node.local_gb_used) - self.assertEqual(0, self.rt.compute_node.memory_mb_used) - self.assertEqual(0, self.rt.compute_node.running_vms) + self.assertEqual(0, cn.local_gb_used) + self.assertEqual(0, cn.memory_mb_used) + self.assertEqual(0, cn.running_vms) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @@ -1383,11 +1410,13 @@ def _claim(mock_save, mock_clone): return self.rt.instance_claim(self.ctx, self.instance, _NODENAME, None) + cn = self.rt.compute_nodes[_NODENAME] + claim = _claim() - self.assertEqual(disk_used, self.rt.compute_node.local_gb_used) + self.assertEqual(disk_used, cn.local_gb_used) self.assertEqual(self.instance.memory_mb, - self.rt.compute_node.memory_mb_used) - self.assertEqual(1, self.rt.compute_node.running_vms) + cn.memory_mb_used) + self.assertEqual(1, cn.running_vms) mock_save = mock.MagicMock() mock_clear_numa = mock.MagicMock() @@ -1404,15 +1433,13 @@ def _abort(): self.assertIsNone(self.instance.host) self.assertIsNone(self.instance.node) - self.assertEqual(0, self.rt.compute_node.local_gb_used) - self.assertEqual(0, self.rt.compute_node.memory_mb_used) - self.assertEqual(0, self.rt.compute_node.running_vms) + self.assertEqual(0, cn.local_gb_used) + self.assertEqual(0, cn.memory_mb_used) + self.assertEqual(0, cn.running_vms) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_limits(self, migr_mock, pci_mock): - self.assertFalse(self.rt.disabled) - pci_mock.return_value = objects.InstancePCIRequests(requests=[]) good_limits = { @@ -1431,13 +1458,12 @@ def test_claim_limits(self, migr_mock, pci_mock): @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_numa(self, migr_mock, pci_mock): - self.assertFalse(self.rt.disabled) - pci_mock.return_value = objects.InstancePCIRequests(requests=[]) + cn = self.rt.compute_nodes[_NODENAME] self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb'] host_topology = _NUMA_HOST_TOPOLOGIES['2mb'] - self.rt.compute_node.numa_topology = host_topology._to_json() + cn.numa_topology = host_topology._to_json() limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']} expected_numa = copy.deepcopy(host_topology) @@ -1448,10 +1474,8 @@ def test_claim_numa(self, migr_mock, pci_mock): with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, _NODENAME, limits) - updated_compute_node = self.rt.compute_node - update_mock.assert_called_once_with(self.ctx.elevated(), - updated_compute_node) - new_numa = updated_compute_node.numa_topology + update_mock.assert_called_once_with(self.ctx.elevated(), cn) + new_numa = cn.numa_topology new_numa = objects.NUMATopology.obj_from_db_obj(new_numa) self.assertEqualNUMAHostTopology(expected_numa, new_numa) @@ -1509,7 +1533,7 @@ def test_resize_claim_same_host(self, get_mock, migr_mock, get_cn_mock, # not using mock.sentinel.ctx because resize_claim calls #elevated ctx = mock.MagicMock() - expected = self.rt.compute_node.obj_clone() + expected = self.rt.compute_nodes[_NODENAME].obj_clone() expected.vcpus_used = (expected.vcpus_used + new_flavor.vcpus) expected.memory_mb_used = (expected.memory_mb_used + @@ -1537,7 +1561,7 @@ def test_resize_claim_same_host(self, get_mock, migr_mock, get_cn_mock, None # move_type is None for resize... ) self.assertIsInstance(claim, claims.MoveClaim) - cn = self.rt.compute_node + cn = self.rt.compute_nodes[_NODENAME] self.assertTrue(obj_base.obj_equal_prims(expected, cn)) self.assertEqual(1, len(self.rt.tracked_migrations)) @@ -1583,7 +1607,7 @@ def _test_instance_build_resize(self, # Init compute node self.rt.update_available_resource(mock.sentinel.ctx, _NODENAME) - expected = self.rt.compute_node.obj_clone() + expected = self.rt.compute_nodes[_NODENAME].obj_clone() instance = _INSTANCE_FIXTURES[0].obj_clone() old_flavor = instance.flavor @@ -1591,13 +1615,15 @@ def _test_instance_build_resize(self, # Build instance with mock.patch.object(instance, 'save'): - self.rt.instance_claim(ctx, instance, None) + self.rt.instance_claim(ctx, instance, _NODENAME, None) expected = compute_update_usage(expected, old_flavor, sign=1) expected.running_vms = 1 - self.assertTrue(obj_base.obj_equal_prims(expected, - self.rt.compute_node, - ignore=['stats'])) + self.assertTrue(obj_base.obj_equal_prims( + expected, + self.rt.compute_nodes[_NODENAME], + ignore=['stats'] + )) # This migration context is fine, it points to the first instance # fixture and indicates a source-and-dest resize. @@ -1630,9 +1656,11 @@ def _test_instance_build_resize(self, self.rt.resize_claim(ctx, instance, new_flavor, _NODENAME) expected = compute_update_usage(expected, new_flavor, sign=1) - self.assertTrue(obj_base.obj_equal_prims(expected, - self.rt.compute_node, - ignore=['stats'])) + self.assertTrue(obj_base.obj_equal_prims( + expected, + self.rt.compute_nodes[_NODENAME], + ignore=['stats'] + )) # Confirm or revert resize if revert: flavor = new_flavor @@ -1641,12 +1669,15 @@ def _test_instance_build_resize(self, flavor = old_flavor prefix = 'old_' - self.rt.drop_move_claim(ctx, instance, flavor, prefix=prefix) + self.rt.drop_move_claim(ctx, instance, _NODENAME, flavor, + prefix=prefix) expected = compute_update_usage(expected, flavor, sign=-1) - self.assertTrue(obj_base.obj_equal_prims(expected, - self.rt.compute_node, - ignore=['stats'])) + self.assertTrue(obj_base.obj_equal_prims( + expected, + self.rt.compute_nodes[_NODENAME], + ignore=['stats'] + )) def test_instance_build_resize_confirm(self): self._test_instance_build_resize() @@ -1840,7 +1871,7 @@ def test_resize_claim_two_instances(self, get_mock, migr_mock, get_cn_mock, instance2.migration_context = mig_context_obj2 flavor2 = _INSTANCE_TYPE_OBJ_FIXTURES[1] - expected = self.rt.compute_node.obj_clone() + expected = self.rt.compute_nodes[_NODENAME].obj_clone() expected.vcpus_used = (expected.vcpus_used + flavor1.vcpus + flavor2.vcpus) @@ -1872,8 +1903,8 @@ def test_resize_claim_two_instances(self, get_mock, migr_mock, get_cn_mock, ) as (create_mig_mock, ctxt_mock, inst_save_mock): self.rt.resize_claim(ctx, instance1, flavor1, _NODENAME) self.rt.resize_claim(ctx, instance2, flavor2, _NODENAME) - self.assertTrue(obj_base.obj_equal_prims(expected, - self.rt.compute_node)) + cn = self.rt.compute_nodes[_NODENAME] + self.assertTrue(obj_base.obj_equal_prims(expected, cn)) self.assertEqual(2, len(self.rt.tracked_migrations), "Expected 2 tracked migrations but got %s" % self.rt.tracked_migrations) @@ -1969,8 +2000,7 @@ class TestUpdateUsageFromMigration(test.NoDBTestCase): @mock.patch('nova.compute.resource_tracker.ResourceTracker.' '_get_instance_type') def test_unsupported_move_type(self, get_mock): - rt = resource_tracker.ResourceTracker(mock.sentinel.ctx, - mock.sentinel.virt_driver, + rt = resource_tracker.ResourceTracker(mock.sentinel.virt_driver, _HOSTNAME) migration = objects.Migration(migration_type='live-migration') # For same-node migrations, the RT's _get_instance_type() method is @@ -2070,7 +2100,8 @@ class TestUpdateUsageFromInstance(BaseTestCase): def setUp(self): super(TestUpdateUsageFromInstance, self).setUp() self._setup_rt() - self.rt.compute_node = _COMPUTE_NODE_FIXTURES[0].obj_clone() + cn = _COMPUTE_NODE_FIXTURES[0].obj_clone() + self.rt.compute_nodes[_NODENAME] = cn self.instance = _INSTANCE_FIXTURES[0].obj_clone() @mock.patch('nova.compute.resource_tracker.ResourceTracker.' diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py index 76aae083a3d..76c5d9f143d 100644 --- a/nova/tests/unit/compute/test_shelve.py +++ b/nova/tests/unit/compute/test_shelve.py @@ -250,7 +250,7 @@ def fake_delete(self2, ctxt, image_id): def fake_claim(context, instance, node, limits): instance.host = self.compute.host requests = objects.InstancePCIRequests(requests=[]) - return claims.Claim(context, instance, + return claims.Claim(context, instance, test_compute.NODENAME, self.rt, _fake_resources(), requests) @@ -365,8 +365,8 @@ def check_save(expected_task_state=None): self.compute.network_api.setup_instance_network_on_host( self.context, instance, self.compute.host) self.rt.instance_claim(self.context, instance, node, limits).AndReturn( - claims.Claim(self.context, instance, self.rt, - _fake_resources(), + claims.Claim(self.context, instance, test_compute.NODENAME, + self.rt, _fake_resources(), objects.InstancePCIRequests(requests=[]))) self.compute.driver.spawn(self.context, instance, mox.IsA(objects.ImageMeta),