From 2c7330ff2e4d621d769e469053bee3a00922be10 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 9 Aug 2019 17:17:45 -0400 Subject: [PATCH 1/3] rt: only map compute node if we created it If ComputeNode.create() fails, the update_available_resource periodic will not try to create it again because it will be mapped in the compute_nodes dict and _init_compute_node will return early but trying to save changes to that ComputeNode object later will fail because there is no id on the object, since we failed to create it in the DB. This simply reverses the logic such that we only map the compute node if we successfully created it. Some existing _init_compute_node testing had to be changed since it relied on the order of when the ComputeNode object is created and put into the compute_nodes dict in order to pass the object along to some much lower-level PCI tracker code, which was arguably mocking too deep for a unit test. That is changed to avoid the low-level mocking and assertions and just assert that _setup_pci_tracker is called as expected. Conflicts: nova/compute/resource_tracker.py nova/tests/unit/compute/test_resource_tracker.py NOTE(mriedem): The resource_tracker.py conflict is due to not having I14a310b20bd9892e7b34464e6baad49bf5928ece in Rocky. The test conflicts are due to not having change I37e8ad5b14262d801702411c2c87e73550adda70 in Rocky. Change-Id: I9fa1d509a3de405d6246fb8670612c65c10cc93b Closes-Bug: #1839674 (cherry picked from commit f578146f372386e1889561cba33e95495e66ce97) (cherry picked from commit 648770bd6897aa2ec95df3ec55344d5803543f07) (cherry picked from commit 35273a844ab2dc2494f0166d9b8228ee302acd4f) --- nova/compute/resource_tracker.py | 6 +- .../unit/compute/test_resource_tracker.py | 73 ++++++++++--------- 2 files changed, 44 insertions(+), 35 deletions(-) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 988de9a0350..da7efe0c0ed 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -580,8 +580,12 @@ def _init_compute_node(self, context, resources): cn = objects.ComputeNode(context) cn.host = self.host self._copy_resources(cn, resources) - self.compute_nodes[nodename] = cn cn.create() + # Only map the ComputeNode into compute_nodes if create() was OK + # because if create() fails, on the next run through here nodename + # would be in compute_nodes and we won't try to create again (because + # of the logic above). + self.compute_nodes[nodename] = cn LOG.info('Compute node record created for ' '%(host)s:%(node)s with uuid: %(uuid)s', {'host': self.host, 'node': nodename, 'uuid': cn.uuid}) diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py index 1d31fec2e9a..2ab2aac7a39 100644 --- a/nova/tests/unit/compute/test_resource_tracker.py +++ b/nova/tests/unit/compute/test_resource_tracker.py @@ -1074,23 +1074,18 @@ def fake_get_all(_ctx, nodename): self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host) @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor') - @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', - return_value=objects.PciDeviceList(objects=[])) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.compute.resource_tracker.ResourceTracker.' '_update') def test_compute_node_created_on_empty(self, update_mock, get_mock, - create_mock, pci_tracker_mock, + create_mock, get_by_hypervisor_mock): get_by_hypervisor_mock.return_value = [] self._test_compute_node_created(update_mock, get_mock, create_mock, - pci_tracker_mock, get_by_hypervisor_mock) @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor') - @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', - return_value=objects.PciDeviceList(objects=[])) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.compute.resource_tracker.ResourceTracker.' @@ -1098,32 +1093,26 @@ def test_compute_node_created_on_empty(self, update_mock, get_mock, def test_compute_node_created_on_empty_rebalance(self, update_mock, get_mock, create_mock, - pci_tracker_mock, get_by_hypervisor_mock): get_by_hypervisor_mock.return_value = [] self._test_compute_node_created(update_mock, get_mock, create_mock, - pci_tracker_mock, get_by_hypervisor_mock, rebalances_nodes=True) @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor') - @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', - return_value=objects.PciDeviceList(objects=[])) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.compute.resource_tracker.ResourceTracker.' '_update') def test_compute_node_created_too_many(self, update_mock, get_mock, - create_mock, pci_tracker_mock, + create_mock, get_by_hypervisor_mock): get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"] self._test_compute_node_created(update_mock, get_mock, create_mock, - pci_tracker_mock, get_by_hypervisor_mock, rebalances_nodes=True) - def _test_compute_node_created(self, update_mock, get_mock, - create_mock, pci_tracker_mock, + def _test_compute_node_created(self, update_mock, get_mock, create_mock, get_by_hypervisor_mock, rebalances_nodes=False): self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0, @@ -1152,12 +1141,12 @@ def _test_compute_node_created(self, update_mock, get_mock, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0, - 'pci_passthrough_devices': '[]' + 'pci_passthrough_devices': '[]', + 'uuid': uuids.compute_node_uuid } # The expected compute represents the initial values used # when creating a compute node. expected_compute = objects.ComputeNode( - id=42, host_ip=resources['host_ip'], vcpus=resources['vcpus'], memory_mb=resources['memory_mb'], @@ -1177,20 +1166,11 @@ def _test_compute_node_created(self, update_mock, get_mock, cpu_allocation_ratio=1.0, disk_allocation_ratio=1.0, stats={'failed_builds': 0}, - pci_device_pools=objects.PciDevicePoolList(objects=[]), uuid=uuids.compute_node_uuid ) - def set_cn_id(): - # The PCI tracker needs the compute node's ID when starting up, so - # make sure that we set the ID value so we don't get a Cannot load - # 'id' in base class error - cn = self.rt.compute_nodes[_NODENAME] - cn.id = 42 # Has to be a number, not a mock - cn.uuid = uuids.compute_node_uuid - - create_mock.side_effect = set_cn_id - self.rt._init_compute_node(mock.sentinel.ctx, resources) + with mock.patch.object(self.rt, '_setup_pci_tracker') as setup_pci: + self.rt._init_compute_node(mock.sentinel.ctx, resources) cn = self.rt.compute_nodes[_NODENAME] get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME, @@ -1202,22 +1182,47 @@ def set_cn_id(): get_by_hypervisor_mock.assert_not_called() create_mock.assert_called_once_with() self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn)) - pci_tracker_mock.assert_called_once_with(mock.sentinel.ctx, - 42) + setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources) self.assertFalse(update_mock.called) + @mock.patch('nova.compute.resource_tracker.ResourceTracker.' + '_setup_pci_tracker') + @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename', + side_effect=exc.ComputeHostNotFound(host=_HOSTNAME)) + @mock.patch('nova.objects.ComputeNode.create', + side_effect=(test.TestingException, None)) + def test_compute_node_create_fail_retry_works(self, mock_create, mock_get, + mock_setup_pci): + """Tests that _init_compute_node will not save the ComputeNode object + in the compute_nodes dict if create() fails. + """ + self._setup_rt() + self.assertEqual({}, self.rt.compute_nodes) + ctxt = context.get_context() + # The first ComputeNode.create fails so rt.compute_nodes should + # remain empty. + resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) + resources['uuid'] = uuids.cn_uuid # for the LOG.info message + self.assertRaises(test.TestingException, + self.rt._init_compute_node, ctxt, resources) + self.assertEqual({}, self.rt.compute_nodes) + # Second create works so compute_nodes should have a mapping. + self.rt._init_compute_node(ctxt, resources) + self.assertIn(_NODENAME, self.rt.compute_nodes) + mock_get.assert_has_calls([mock.call( + ctxt, _HOSTNAME, _NODENAME)] * 2) + self.assertEqual(2, mock_create.call_count) + mock_setup_pci.assert_called_once_with( + ctxt, test.MatchType(objects.ComputeNode), resources) + @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor') - @mock.patch('nova.objects.PciDeviceList.get_by_compute_node', - return_value=objects.PciDeviceList(objects=[])) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.compute.resource_tracker.ResourceTracker.' '_update') def test_node_removed(self, update_mock, get_mock, - create_mock, pci_tracker_mock, - get_by_hypervisor_mock): + create_mock, get_by_hypervisor_mock): self._test_compute_node_created(update_mock, get_mock, create_mock, - pci_tracker_mock, get_by_hypervisor_mock) self.rt.old_resources[_NODENAME] = mock.sentinel.foo self.assertIn(_NODENAME, self.rt.compute_nodes) From 62aac69eec9c3c211bef60bb68a54ba44e383e89 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 9 Aug 2019 17:24:07 -0400 Subject: [PATCH 2/3] Add functional regression recreate test for bug 1839560 This adds a functional test which recreates bug 1839560 where the driver reports a node, then no longer reports it so the compute manager deletes it, and then the driver reports it again later (this can be common with ironic nodes as they undergo maintenance). The issue is that since Ia69fabce8e7fd7de101e291fe133c6f5f5f7056a in Rocky, the ironic node uuid is re-used for the compute node uuid but there is a unique constraint on the compute node uuid so when trying to create the compute node once the ironic node is available again, the compute node create fails with a duplicate entry error due to the duplicate uuid. To recreate this in the functional test, a new fake virt driver is added which provides a predictable uuid per node like the ironic driver. The test also shows that archiving the database is a way to workaround the bug until it's properly fixed. NOTE(mriedem): Since change Idaed39629095f86d24a54334c699a26c218c6593 is not in Rocky the PlacementFixture still comes from nova_fixtures. Change-Id: If822509e906d5094f13a8700b2b9ed3c40580431 Related-Bug: #1839560 (cherry picked from commit 89dd74ac7f1028daadf86cb18948e27fe9d1d411) (cherry picked from commit e7109d43d6d6db0f9db18976585f3334d2be72bf) (cherry picked from commit ecd1e046214e087dd484359f256386a3e8962ec1) --- .../regressions/test_bug_1839560.py | 119 ++++++++++++++++++ nova/virt/fake.py | 14 +++ 2 files changed, 133 insertions(+) create mode 100644 nova/tests/functional/regressions/test_bug_1839560.py diff --git a/nova/tests/functional/regressions/test_bug_1839560.py b/nova/tests/functional/regressions/test_bug_1839560.py new file mode 100644 index 00000000000..33ea655c6b4 --- /dev/null +++ b/nova/tests/functional/regressions/test_bug_1839560.py @@ -0,0 +1,119 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from nova import context +from nova.db import api as db_api +from nova import exception +from nova import objects +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova import utils +from nova.virt import fake as fake_virt + +LOG = logging.getLogger(__name__) + + +class PeriodicNodeRecreateTestCase(test.TestCase, + integrated_helpers.InstanceHelperMixin): + """Regression test for bug 1839560 introduced in Rocky. + + When an ironic node is undergoing maintenance the driver will not report + it as an available node to the ComputeManager.update_available_resource + periodic task. The ComputeManager will then (soft) delete a ComputeNode + record for that no-longer-available node. If/when the ironic node is + available again and the driver reports it, the ResourceTracker will attempt + to create a ComputeNode record for the ironic node. + + The regression with change Ia69fabce8e7fd7de101e291fe133c6f5f5f7056a is + that the ironic node uuid is used as the ComputeNode.uuid and there is + a unique constraint on the ComputeNode.uuid value in the database. So + trying to create a ComputeNode with the same uuid (after the ironic node + comes back from being unavailable) fails with a DuplicateEntry error since + there is a (soft) deleted version of the ComputeNode with the same uuid + in the database. + """ + def setUp(self): + super(PeriodicNodeRecreateTestCase, self).setUp() + # We need the PlacementFixture for the compute nodes to report in but + # otherwise don't care about placement for this test. + self.useFixture(nova_fixtures.PlacementFixture()) + # Start up the API so we can query the os-hypervisors API. + self.api = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')).admin_api + # Make sure we're using the fake driver that has predictable uuids + # for each node. + self.flags(compute_driver='fake.PredictableNodeUUIDDriver') + + def test_update_available_resource_node_recreate(self): + # First we create a compute service to manage a couple of fake nodes. + # When start_service runs, it will create the node1 and node2 + # ComputeNodes. + fake_virt.set_nodes(['node1', 'node2']) + self.addCleanup(fake_virt.restore_nodes) + compute = self.start_service('compute', 'node1') + # Now we should have two compute nodes, make sure the hypervisors API + # shows them. + hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] + self.assertEqual(2, len(hypervisors), hypervisors) + self.assertEqual({'node1', 'node2'}, + set([hyp['hypervisor_hostname'] + for hyp in hypervisors])) + # Now stub the driver to only report node1. This is making it look like + # node2 is no longer available when update_available_resource runs. + compute.manager.driver._nodes = ['node1'] + ctxt = context.get_admin_context() + compute.manager.update_available_resource(ctxt) + # node2 should have been deleted, check the logs and API. + log = self.stdlog.logger.output + self.assertIn('Deleting orphan compute node', log) + self.assertIn('hypervisor host is node2', log) + hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] + self.assertEqual(1, len(hypervisors), hypervisors) + self.assertEqual('node1', hypervisors[0]['hypervisor_hostname']) + # But the node2 ComputeNode is still in the database with deleted!=0. + with utils.temporary_mutation(ctxt, read_deleted='yes'): + cn = objects.ComputeNode.get_by_host_and_nodename( + ctxt, 'node1', 'node2') + self.assertTrue(cn.deleted) + # Now stub the driver again to report node2 as being back and run + # the periodic task. + compute.manager.driver._nodes = ['node1', 'node2'] + compute.manager.update_available_resource(ctxt) + # FIXME(mriedem): This is bug 1839560 where the ResourceTracker fails + # to create a ComputeNode for node2 because of conflicting UUIDs. + log = self.stdlog.logger.output + self.assertIn('Error updating resources for node node2', log) + self.assertIn('DBDuplicateEntry', log) + # Should still only have one reported hypervisor (node1). + hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] + self.assertEqual(1, len(hypervisors), hypervisors) + # Test the workaround for bug 1839560 by archiving the deleted node2 + # compute_nodes table record which will allow the periodic to create a + # new entry for node2. We can remove this when the bug is fixed. + LOG.info('Archiving the database.') + archived = db_api.archive_deleted_rows(1000)[0] + self.assertIn('compute_nodes', archived) + self.assertEqual(1, archived['compute_nodes']) + with utils.temporary_mutation(ctxt, read_deleted='yes'): + self.assertRaises(exception.ComputeHostNotFound, + objects.ComputeNode.get_by_host_and_nodename, + ctxt, 'node1', 'node2') + # Now run the periodic again and we should have a new ComputeNode for + # node2. + LOG.info('Running update_available_resource which should create a new ' + 'ComputeNode record for node2.') + compute.manager.update_available_resource(ctxt) + hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] + self.assertEqual(2, len(hypervisors), hypervisors) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 300c2510e97..1271a7463e8 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -27,6 +27,7 @@ import contextlib import copy import time +import uuid from oslo_log import log as logging from oslo_serialization import jsonutils @@ -674,6 +675,19 @@ class MediumFakeDriver(FakeDriver): local_gb = 1028 +class PredictableNodeUUIDDriver(SmallFakeDriver): + """SmallFakeDriver variant that reports a predictable node uuid in + get_available_resource, like IronicDriver. + """ + def get_available_resource(self, nodename): + resources = super( + PredictableNodeUUIDDriver, self).get_available_resource(nodename) + # This is used in ComputeNode.update_from_virt_driver which is called + # from the ResourceTracker when creating a ComputeNode. + resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename) + return resources + + class FakeRescheduleDriver(FakeDriver): """FakeDriver derivative that triggers a reschedule on the first spawn attempt. This is expected to only be used in tests that have more than From 0fbfc2665bf319f2211592312c3a17521d0d954a Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 12 Aug 2019 14:39:16 -0400 Subject: [PATCH 3/3] Restore soft-deleted compute node with same uuid There is a unique index on the compute_nodes.uuid column which means we can't have more than one compute_nodes record in the same DB with the same UUID even if one is soft deleted because the deleted column is not part of that unique index constraint. This is a problem with ironic nodes where the node is 1:1 with the compute node record, and when a node is undergoing maintenance the driver doesn't return it from get_available_nodes() so the ComputeManager.update_available_resource periodic task (soft) deletes the compute node record, but when the node is no longer under maintenance in ironic and the driver reports it, the ResourceTracker._init_compute_node code will fail to create the ComputeNode record again because of the duplicate uuid. This change handles the DBDuplicateEntry error in compute_node_create by finding the soft-deleted compute node with the same uuid and simply updating it to no longer be (soft) deleted. Closes-Bug: #1839560 Change-Id: Iafba419fe86446ffe636721f523fb619f8f787b3 (cherry picked from commit 8b007266f438ec0a5a797d05731cce6f2b155f4c) (cherry picked from commit 1b021665281b74c865d3571fc90772b52d70e467) (cherry picked from commit 9ce94844fa6a43368445182bb086876874256197) --- nova/db/sqlalchemy/api.py | 46 ++++++++++++++++++- .../regressions/test_bug_1839560.py | 40 +++++++--------- nova/tests/unit/db/test_db_api.py | 12 +++++ 3 files changed, 74 insertions(+), 24 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 37d1beed29a..9cbc2c974fe 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -30,6 +30,7 @@ from oslo_db.sqlalchemy import update_match from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_log import log as logging +from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils @@ -693,11 +694,54 @@ def compute_node_create(context, values): compute_node_ref = models.ComputeNode() compute_node_ref.update(values) - compute_node_ref.save(context.session) + try: + compute_node_ref.save(context.session) + except db_exc.DBDuplicateEntry: + with excutils.save_and_reraise_exception(logger=LOG) as err_ctx: + # Check to see if we have a (soft) deleted ComputeNode with the + # same UUID and if so just update it and mark as no longer (soft) + # deleted. See bug 1839560 for details. + if 'uuid' in values: + # Get a fresh context for a new DB session and allow it to + # get a deleted record. + ctxt = nova.context.get_admin_context(read_deleted='yes') + compute_node_ref = _compute_node_get_and_update_deleted( + ctxt, values) + # If we didn't get anything back we failed to find the node + # by uuid and update it so re-raise the DBDuplicateEntry. + if compute_node_ref: + err_ctx.reraise = False return compute_node_ref +@pick_context_manager_writer +def _compute_node_get_and_update_deleted(context, values): + """Find a ComputeNode by uuid, update and un-delete it. + + This is a special case from the ``compute_node_create`` method which + needs to be separate to get a new Session. + + This method will update the ComputeNode, if found, to have deleted=0 and + deleted_at=None values. + + :param context: request auth context which should be able to read deleted + records + :param values: values used to update the ComputeNode record - must include + uuid + :return: updated ComputeNode sqlalchemy model object if successfully found + and updated, None otherwise + """ + cn = model_query( + context, models.ComputeNode).filter_by(uuid=values['uuid']).first() + if cn: + # Update with the provided values but un-soft-delete. + update_values = copy.deepcopy(values) + update_values['deleted'] = 0 + update_values['deleted_at'] = None + return compute_node_update(context, cn.id, update_values) + + @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def compute_node_update(context, compute_id, values): diff --git a/nova/tests/functional/regressions/test_bug_1839560.py b/nova/tests/functional/regressions/test_bug_1839560.py index 33ea655c6b4..02786c11f83 100644 --- a/nova/tests/functional/regressions/test_bug_1839560.py +++ b/nova/tests/functional/regressions/test_bug_1839560.py @@ -14,7 +14,6 @@ from nova import context from nova.db import api as db_api -from nova import exception from nova import objects from nova import test from nova.tests import fixtures as nova_fixtures @@ -90,30 +89,25 @@ def test_update_available_resource_node_recreate(self): # Now stub the driver again to report node2 as being back and run # the periodic task. compute.manager.driver._nodes = ['node1', 'node2'] + LOG.info('Running update_available_resource which should bring back ' + 'node2.') compute.manager.update_available_resource(ctxt) - # FIXME(mriedem): This is bug 1839560 where the ResourceTracker fails - # to create a ComputeNode for node2 because of conflicting UUIDs. + # The DBDuplicateEntry error should have been handled and resulted in + # updating the (soft) deleted record to no longer be deleted. log = self.stdlog.logger.output - self.assertIn('Error updating resources for node node2', log) - self.assertIn('DBDuplicateEntry', log) - # Should still only have one reported hypervisor (node1). + self.assertNotIn('DBDuplicateEntry', log) + # Should have two reported hypervisors again. hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] - self.assertEqual(1, len(hypervisors), hypervisors) - # Test the workaround for bug 1839560 by archiving the deleted node2 - # compute_nodes table record which will allow the periodic to create a - # new entry for node2. We can remove this when the bug is fixed. + self.assertEqual(2, len(hypervisors), hypervisors) + # Now that the node2 record was un-soft-deleted, archiving should not + # remove any compute_nodes. LOG.info('Archiving the database.') archived = db_api.archive_deleted_rows(1000)[0] - self.assertIn('compute_nodes', archived) - self.assertEqual(1, archived['compute_nodes']) - with utils.temporary_mutation(ctxt, read_deleted='yes'): - self.assertRaises(exception.ComputeHostNotFound, - objects.ComputeNode.get_by_host_and_nodename, - ctxt, 'node1', 'node2') - # Now run the periodic again and we should have a new ComputeNode for - # node2. - LOG.info('Running update_available_resource which should create a new ' - 'ComputeNode record for node2.') - compute.manager.update_available_resource(ctxt) - hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] - self.assertEqual(2, len(hypervisors), hypervisors) + self.assertNotIn('compute_nodes', archived) + cn2 = objects.ComputeNode.get_by_host_and_nodename( + ctxt, 'node1', 'node2') + self.assertFalse(cn2.deleted) + self.assertIsNone(cn2.deleted_at) + # The node2 id and uuid should not have changed in the DB. + self.assertEqual(cn.id, cn2.id) + self.assertEqual(cn.uuid, cn2.uuid) diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py index 0433f126b1b..5b269140429 100644 --- a/nova/tests/unit/db/test_db_api.py +++ b/nova/tests/unit/db/test_db_api.py @@ -6882,6 +6882,18 @@ def test_compute_node_create(self): new_stats = jsonutils.loads(self.item['stats']) self.assertEqual(self.stats, new_stats) + def test_compute_node_create_duplicate_host_hypervisor_hostname(self): + """Tests to make sure that DBDuplicateEntry is raised when trying to + create a duplicate ComputeNode with the same host and + hypervisor_hostname values but different uuid values. This makes + sure that when _compute_node_get_and_update_deleted returns None + the DBDuplicateEntry is re-raised. + """ + other_node = dict(self.compute_node_dict) + other_node['uuid'] = uuidutils.generate_uuid() + self.assertRaises(db_exc.DBDuplicateEntry, + db.compute_node_create, self.ctxt, other_node) + def test_compute_node_get_all(self): nodes = db.compute_node_get_all(self.ctxt) self.assertEqual(1, len(nodes))