Skip to content

Commit

Permalink
Merge "Consolidate inventory refresh"
Browse files Browse the repository at this point in the history
  • Loading branch information
Zuul authored and openstack-gerrit committed Jan 30, 2019
2 parents 9fab7e7 + 2f77e7a commit 16dda27
Show file tree
Hide file tree
Showing 5 changed files with 133 additions and 146 deletions.
5 changes: 3 additions & 2 deletions nova/api/openstack/placement/handlers/reshaper.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,10 @@ def reshape(req):
generation = inventory_data['resource_provider_generation']
if generation != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict: '
_('resource provider generation conflict for provider %(rp)s: '
'actual: %(actual)s, given: %(given)s') %
{'actual': resource_provider.generation,
{'rp': rp_uuid,
'actual': resource_provider.generation,
'given': generation},
comment=errors.CONCURRENT_UPDATE)

Expand Down
4 changes: 2 additions & 2 deletions nova/conf/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -717,10 +717,10 @@
# all about. Reference bug(s). Unless we're just going to remove it.
help="""
Interval for updating nova-compute-side cache of the compute node resource
provider's aggregates and traits info.
provider's inventories, aggregates, and traits.
This option specifies the number of seconds between attempts to update a
provider's aggregates and traits information in the local cache of the compute
provider's inventories, aggregates and traits in the local cache of the compute
node.
A value of zero disables cache refresh completely.
Expand Down
30 changes: 12 additions & 18 deletions nova/scheduler/client/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -736,9 +736,6 @@ def _ensure_resource_provider(self, context, uuid, name=None,
# At this point, the whole tree exists in the local cache.

for uuid_to_refresh in uuids_to_refresh:
# NOTE(efried): _refresh_associations doesn't refresh inventory
# (yet) - see that method's docstring for the why.
self._refresh_and_get_inventory(context, uuid_to_refresh)
self._refresh_associations(context, uuid_to_refresh, force=True)

return uuid
Expand Down Expand Up @@ -816,18 +813,14 @@ def _refresh_associations(self, context, rp_uuid, force=False,
this process, CONF.compute.resource_provider_association_refresh
seconds have passed, or the force arg has been set to True.
Note that we do *not* refresh inventories. The reason is largely
historical: all code paths that get us here are doing inventory refresh
themselves.
:param context: The security context
:param rp_uuid: UUID of the resource provider to check for fresh
aggregates and traits
inventories, aggregates, and traits
:param force: If True, force the refresh
:param refresh_sharing: If True, fetch all the providers associated
by aggregate with the specified provider,
including their traits and aggregates (but not
*their* sharing providers).
including their inventories, traits, and
aggregates (but not *their* sharing providers).
:raise: On various placement API errors, one of:
- ResourceProviderAggregateRetrievalFailed
- ResourceProviderTraitRetrievalFailed
Expand All @@ -836,6 +829,10 @@ def _refresh_associations(self, context, rp_uuid, force=False,
communication fails.
"""
if force or self._associations_stale(rp_uuid):
# Refresh inventories
msg = "Refreshing inventories for resource provider %s"
LOG.debug(msg, rp_uuid)
self._refresh_and_get_inventory(context, rp_uuid)
# Refresh aggregates
agg_info = self._get_provider_aggregates(context, rp_uuid)
# If @safe_connect makes the above return None, this will raise
Expand Down Expand Up @@ -872,11 +869,11 @@ def _refresh_associations(self, context, rp_uuid, force=False,
self._provider_tree.new_root(
rp['name'], rp['uuid'],
generation=rp['generation'])
# Now we have to (populate or) refresh that guy's traits
# and aggregates (but not *his* aggregate-associated
# providers). No need to override force=True for newly-
# added providers - the missing timestamp will always
# trigger them to refresh.
# Now we have to (populate or) refresh that guy's traits,
# aggregates, and inventories (but not *his* aggregate-
# associated providers). No need to override force=True for
# newly- added providers - the missing timestamp will
# always trigger them to refresh.
self._refresh_associations(context, rp['uuid'],
force=force,
refresh_sharing=False)
Expand Down Expand Up @@ -1069,9 +1066,6 @@ def get_provider_tree_and_ensure_root(self, context, rp_uuid, name=None,
self._ensure_resource_provider(
context, rp_uuid, name=name,
parent_provider_uuid=parent_provider_uuid)
# Ensure inventories are up to date (for *all* cached RPs)
for uuid in self._provider_tree.get_provider_uuids():
self._refresh_and_get_inventory(context, uuid)
# Return a *copy* of the tree.
return copy.deepcopy(self._provider_tree)

Expand Down
31 changes: 27 additions & 4 deletions nova/tests/functional/test_report_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1180,12 +1180,19 @@ def assertProviderTree(self, expected_dict, actual_tree):
for k, expected in pdict.items():
# For inventories, we're only validating totals
if k is 'inventory':
self.assertEqual(set(expected), set(actual_data.inventory))
self.assertEqual(
set(expected), set(actual_data.inventory),
"Mismatched inventory keys for provider %s" % uuid)
for rc, totaldict in expected.items():
self.assertEqual(totaldict['total'],
actual_data.inventory[rc]['total'])
self.assertEqual(
totaldict['total'],
actual_data.inventory[rc]['total'],
"Mismatched inventory totals for provider %s" %
uuid)
else:
self.assertEqual(expected, getattr(actual_data, k))
self.assertEqual(expected, getattr(actual_data, k),
"Mismatched %s for provider %s" %
(k, uuid))

def _set_up_provider_tree_allocs(self):
"""Create some allocations on our compute (with sharing).
Expand Down Expand Up @@ -1294,6 +1301,14 @@ def test_reshape(self):
# in gabbits and via update_from_provider_tree.
self._set_up_provider_tree()
self._set_up_provider_tree_allocs()
# Updating allocations bumps generations for affected providers.
# In real life, the subsequent update_from_provider_tree will
# bounce 409, the cache will be cleared, and the operation will be
# retried. We don't care about any of that retry logic in the scope
# of this test case, so just clear the cache so
# get_provider_tree_and_ensure_root repopulates it and we avoid the
# conflict exception.
self.client.clear_provider_cache()

ptree = self.client.get_provider_tree_and_ensure_root(
self.context, self.compute_uuid)
Expand Down Expand Up @@ -1336,6 +1351,14 @@ def test_update_from_provider_tree_reshape(self):
exp_allocs = self._set_up_provider_tree_allocs()
# Save a copy of this for later
orig_exp_allocs = copy.deepcopy(exp_allocs)
# Updating allocations bumps generations for affected providers.
# In real life, the subsequent update_from_provider_tree will
# bounce 409, the cache will be cleared, and the operation will be
# retried. We don't care about any of that retry logic in the scope
# of this test case, so just clear the cache so
# get_provider_tree_and_ensure_root repopulates it and we avoid the
# conflict exception.
self.client.clear_provider_cache()
# Another null reshape: no inv changes, no alloc changes
ptree = self.client.get_provider_tree_and_ensure_root(
self.context, self.compute_uuid)
Expand Down

0 comments on commit 16dda27

Please sign in to comment.