From 5ae479c7e3065c3f932935a131d5c81712a4095a Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 9 Jun 2023 11:52:38 +0000 Subject: [PATCH 1/4] Delete the "Chassis_Private" register when deleting an agent The method ``delete_agent`` [1] now deletes both the OVN SB "Chassis" and "Chassis_Private" registers when called. [1]https://github.com/openstack/neutron/blob/ad81687e2955f494c28e2122763a2332afd3d26d/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py#L1400-L1401 Closes-Bug: #2023171 Change-Id: Ibb1285f61c172d2b6e281f41315415ce8b8526e7 (cherry picked from commit 9ca0e34a5edeb0419d86117dfaabb3f7842bb347) --- .../drivers/ovn/mech_driver/mech_driver.py | 9 ++++++++ .../ovn/mech_driver/test_mech_driver.py | 22 ++++++++++++++++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index 37118ec0894..578e004b761 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -40,6 +40,7 @@ from oslo_db import exception as os_db_exc from oslo_log import log from oslo_utils import timeutils +from ovsdbapp.backend.ovs_idl import idlutils from neutron._i18n import _ from neutron.common.ovn import acl as ovn_acl @@ -1322,6 +1323,14 @@ def delete_agent(self, context, id, _driver=None): chassis_name = agent['configurations']['chassis_name'] _driver.sb_ovn.chassis_del(chassis_name, if_exists=True).execute( check_error=True) + if _driver.sb_ovn.is_table_present('Chassis_Private'): + # TODO(ralonsoh): implement the corresponding chassis_private + # commands in ovsdbapp. + try: + _driver.sb_ovn.db_destroy('Chassis_Private', chassis_name).execute( + check_error=True) + except idlutils.RowNotFound: + pass # Send a specific event that all API workers can get to delete the agent # from their caches. Ideally we could send a single transaction that both # created and deleted the key, but alas python-ovs is too "smart" diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py index ee92f68dbdb..e1c63d7fce1 100644 --- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py +++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py @@ -1169,6 +1169,19 @@ def _create_test_agent(self): _, status = self.plugin.create_or_update_agent(self.context, agent) return status['id'] + def _check_chassis_registers(self, present=True): + chassis = self.sb_api.lookup('Chassis', self.chassis, default=None) + chassis_name = chassis.name if chassis else None + if self.sb_api.is_table_present('Chassis_Private'): + ch_private = self.sb_api.lookup( + 'Chassis_Private', self.chassis, default=None) + ch_private_name = ch_private.name if ch_private else None + self.assertEqual(chassis_name, ch_private_name) + if present: + self.assertEqual(self.chassis, chassis_name) + else: + self.assertIsNone(chassis) + def test_agent_show(self): for agent_id in self.agent_types.values(): self.assertTrue(self.plugin.get_agent(self.context, agent_id)) @@ -1217,12 +1230,15 @@ def test_agent_delete(self): self.assertRaises(agent_exc.AgentNotFound, self.plugin.get_agent, self.context, agent_id) - # OVN controller agent deletion, that triggers the "Chassis" register - # deletion. The "Chassis" register deletion triggers the host OVN - # agents deletion, both controller and metadata if present. + # OVN controller agent deletion, that triggers the "Chassis" and + # "Chassis_Private" registers deletion. The registers deletion triggers + # the host OVN agents deletion, both controller and metadata if + # present. controller_id = self.agent_types[ovn_const.OVN_CONTROLLER_AGENT] metadata_id = self.agent_types[ovn_const.OVN_METADATA_AGENT] + self._check_chassis_registers() self.plugin.delete_agent(self.context, controller_id) + self._check_chassis_registers(present=False) self.assertRaises(agent_exc.AgentNotFound, self.plugin.get_agent, self.context, controller_id) self.assertEqual( From cd4f345f60c29900f8c7ca64ae7a1f396d98d28d Mon Sep 17 00:00:00 2001 From: Arnau Verdaguer Date: Mon, 8 May 2023 16:51:41 +0200 Subject: [PATCH 2/4] [OVN][Trunk] Add port binding info on subport when parent is bound The host ID and VIF details are added on the subport when the trunk is created, if it's created when it's not attached to any VM this fields will remain empty and be filled on the parent port when it gets bound to a host, but there's no callback to add this info on the subport. Closes-Bug: #2018289 Closes-Bug: #2024160 Change-Id: I34bb6f178c314907bdf9f76789777f6736938b67 (cherry picked from commit 955e621167987626806128d259a29d4e62acf102) --- .../trunk/drivers/ovn/trunk_driver.py | 30 +++++++++-- .../trunk/drivers/ovn/test_trunk_driver.py | 50 +++++++++++++------ .../trunk/drivers/ovn/test_trunk_driver.py | 2 + 3 files changed, 62 insertions(+), 20 deletions(-) diff --git a/neutron/services/trunk/drivers/ovn/trunk_driver.py b/neutron/services/trunk/drivers/ovn/trunk_driver.py index 721cdc52924..41826a89613 100644 --- a/neutron/services/trunk/drivers/ovn/trunk_driver.py +++ b/neutron/services/trunk/drivers/ovn/trunk_driver.py @@ -49,11 +49,13 @@ def _set_sub_ports(self, parent_port, subports): context = n_context.get_admin_context() db_parent_port = port_obj.Port.get_object(context, id=parent_port) parent_port_status = db_parent_port.status + parent_port_bindings = db_parent_port.bindings[0] for subport in subports: with db_api.CONTEXT_WRITER.using(context), ( txn(check_error=True)) as ovn_txn: port = self._set_binding_profile(context, subport, parent_port, - parent_port_status, ovn_txn) + parent_port_status, + parent_port_bindings, ovn_txn) db_rev.bump_revision(context, port, ovn_const.TYPE_PORTS) def _unset_sub_ports(self, subports): @@ -67,7 +69,8 @@ def _unset_sub_ports(self, subports): @db_base_plugin_common.convert_result_to_dict def _set_binding_profile(self, context, subport, parent_port, - parent_port_status, ovn_txn): + parent_port_status, + parent_port_bindings, ovn_txn): LOG.debug("Setting parent %s for subport %s", parent_port, subport.port_id) db_port = port_obj.Port.get_object(context, id=subport.port_id) @@ -79,6 +82,9 @@ def _set_binding_profile(self, context, subport, parent_port, check_rev_cmd = self.plugin_driver.nb_ovn.check_revision_number( db_port.id, db_port, ovn_const.TYPE_PORTS) ovn_txn.add(check_rev_cmd) + parent_binding_host = '' + if parent_port_bindings.host: + parent_binding_host = parent_port_bindings.host try: # NOTE(flaviof): We expect binding's host to be set. Otherwise, # sub-port will not transition from DOWN to ACTIVE. @@ -94,6 +100,7 @@ def _set_binding_profile(self, context, subport, parent_port, port_obj.PortBinding.update_object( context, {'profile': binding.profile, + 'host': parent_binding_host, 'vif_type': portbindings.VIF_TYPE_OVS}, port_id=subport.port_id, host=binding.host) @@ -155,6 +162,14 @@ def _unset_binding_profile(self, context, subport, ovn_txn): LOG.debug("Done unsetting parent for subport %s", subport.port_id) return db_port + def trunk_updated(self, trunk): + # Check if parent port is handled by OVN. + if not self.plugin_driver.nb_ovn.lookup('Logical_Switch_Port', + trunk.port_id, default=None): + return + if trunk.sub_ports: + self._set_sub_ports(trunk.port_id, trunk.sub_ports) + def trunk_created(self, trunk): # Check if parent port is handled by OVN. if not self.plugin_driver.nb_ovn.lookup('Logical_Switch_Port', @@ -189,6 +204,8 @@ def subports_deleted(self, trunk, subports): def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.trunk_created(payload.states[0]) + elif event == events.AFTER_UPDATE: + self.trunk_updated(payload.states[0]) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.states[0]) @@ -215,13 +232,16 @@ def register(self, resource, event, trigger, payload=None): super(OVNTrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = OVNTrunkHandler(self.plugin_driver) - for trunk_event in (events.AFTER_CREATE, events.AFTER_DELETE): + for _event in (events.AFTER_CREATE, events.AFTER_UPDATE, + events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, - trunk_event) + _event) + + for _event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.subport_event, resources.SUBPORTS, - trunk_event) + _event) @classmethod def create(cls, plugin_driver): diff --git a/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py b/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py index fed86fabbb4..1c19b38b282 100644 --- a/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py +++ b/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py @@ -14,21 +14,23 @@ import contextlib -from neutron.services.trunk import plugin as trunk_plugin -from neutron.tests.functional import base +from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_consts -from neutron_lib.objects import registry as obj_reg +from neutron_lib.db import api as db_api from neutron_lib.plugins import utils from neutron_lib.services.trunk import constants as trunk_consts from oslo_utils import uuidutils from neutron.common.ovn import constants as ovn_const +from neutron.objects import ports as port_obj +from neutron.services.trunk import plugin as trunk_plugin +from neutron.tests.functional import base class TestOVNTrunkDriver(base.TestOVNFunctionalBase): - def setUp(self): - super(TestOVNTrunkDriver, self).setUp() + def setUp(self, **kwargs): + super().setUp(**kwargs) self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type( trunk_consts.SEGMENTATION_TYPE_VLAN, @@ -39,7 +41,8 @@ def trunk(self, sub_ports=None): sub_ports = sub_ports or [] with self.network() as network: with self.subnet(network=network) as subnet: - with self.port(subnet=subnet) as parent_port: + with self.port(subnet=subnet, + device_owner='compute:nova') as parent_port: tenant_id = uuidutils.generate_uuid() trunk = {'trunk': { 'port_id': parent_port['port']['id'], @@ -64,17 +67,14 @@ def _get_ovn_trunk_info(self): if row.parent_name and row.tag: device_owner = row.external_ids[ ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY] - revision_number = row.external_ids[ - ovn_const.OVN_REV_NUM_EXT_ID_KEY] ovn_trunk_info.append({'port_id': row.name, 'parent_port_id': row.parent_name, 'tag': row.tag, 'device_owner': device_owner, - 'revision_number': revision_number, }) return ovn_trunk_info - def _verify_trunk_info(self, trunk, has_items): + def _verify_trunk_info(self, trunk, has_items, host=''): ovn_subports_info = self._get_ovn_trunk_info() neutron_subports_info = [] for subport in trunk.get('sub_ports', []): @@ -83,12 +83,12 @@ def _verify_trunk_info(self, trunk, has_items): 'parent_port_id': [trunk['port_id']], 'tag': [subport['segmentation_id']], 'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER, - 'revision_number': '2', }) - # Check that the subport has the binding is active. - binding = obj_reg.load_class('PortBinding').get_object( - self.context, port_id=subport['port_id'], host='') - self.assertEqual(n_consts.PORT_STATUS_ACTIVE, binding['status']) + # Check the subport binding. + pb = port_obj.PortBinding.get_object( + self.context, port_id=subport['port_id'], host=host) + self.assertEqual(n_consts.PORT_STATUS_ACTIVE, pb.status) + self.assertEqual(host, pb.host) self.assertCountEqual(ovn_subports_info, neutron_subports_info) self.assertEqual(has_items, len(neutron_subports_info) != 0) @@ -96,6 +96,14 @@ def _verify_trunk_info(self, trunk, has_items): if trunk.get('status'): self.assertEqual(trunk_consts.TRUNK_ACTIVE_STATUS, trunk['status']) + def _bind_port(self, port_id, host): + with db_api.CONTEXT_WRITER.using(self.context): + pb = port_obj.PortBinding.get_object(self.context, + port_id=port_id, host='') + pb.delete() + port_obj.PortBinding(self.context, port_id=port_id, host=host, + vif_type=portbindings.VIF_TYPE_OVS).create() + def test_trunk_create(self): with self.trunk() as trunk: self._verify_trunk_info(trunk, has_items=False) @@ -113,10 +121,22 @@ def test_subport_add(self): new_trunk = self.trunk_plugin.get_trunk(self.context, trunk['id']) self._verify_trunk_info(new_trunk, has_items=True) + # Bind parent port. That will trigger the binding of the + # trunk subports too, using the same host ID. + self._bind_port(trunk['port_id'], 'host1') + self.mech_driver.set_port_status_up(trunk['port_id']) + self._verify_trunk_info(new_trunk, has_items=True, + host='host1') def test_subport_delete(self): with self.subport() as subport: with self.trunk([subport]) as trunk: + # Bind parent port. + self._bind_port(trunk['port_id'], 'host1') + self.mech_driver.set_port_status_up(trunk['port_id']) + self._verify_trunk_info(trunk, has_items=True, + host='host1') + self.trunk_plugin.remove_subports(self.context, trunk['id'], {'sub_ports': [subport]}) new_trunk = self.trunk_plugin.get_trunk(self.context, diff --git a/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py b/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py index 894520bf860..5891c2ab2c4 100644 --- a/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py +++ b/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py @@ -122,6 +122,7 @@ def test_create_trunk(self): mock.call(mock.ANY, {'profile': {'parent_name': trunk.port_id, 'tag': s_port.segmentation_id}, + 'host': mock.ANY, 'vif_type': portbindings.VIF_TYPE_OVS}, host=mock.ANY, port_id=s_port.port_id) @@ -152,6 +153,7 @@ def test_create_trunk_port_db_exception(self): self.mock_update_pb.assert_called_once_with( mock.ANY, {'profile': {'parent_name': self.sub_port_1.trunk_id, 'tag': self.sub_port_1.segmentation_id}, + 'host': 'foo.com', 'vif_type': portbindings.VIF_TYPE_OVS}, host='foo.com', port_id=self.sub_port_1.port_id) self.mock_port_update.assert_not_called() From 4764ed550a47d591e272ff2275659a3aee78cdf5 Mon Sep 17 00:00:00 2001 From: elajkat Date: Thu, 2 Mar 2023 09:58:53 +0100 Subject: [PATCH 3/4] [qos] _validate_create_network_callback return in no network It seems that _validate_create_network_callback notified without network_id in payload, to avoid issues in such case return. Change-Id: I15d22e0e2b4bf3e4f882b521bcd6a13f84e4feed Closes-Bug: #2008912 (cherry picked from commit ec4bfb91f0d3ae4aa4cbe8ba05c20cb00515b00d) --- neutron/services/qos/qos_plugin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py index d01627f625b..ad798bc83e9 100644 --- a/neutron/services/qos/qos_plugin.py +++ b/neutron/services/qos/qos_plugin.py @@ -651,9 +651,9 @@ def _validate_create_network_callback(self, resource, event, trigger, network_id = payload.resource_id network = network_object.Network.get_object(context, id=network_id) - policy_id = network.qos_policy_id - if policy_id is None: + if not network or not getattr(network, 'qos_policy_id', None): return + policy_id = network.qos_policy_id policy = policy_object.QosPolicy.get_object( context.elevated(), id=policy_id) From 73ba302faf6f5007068c05b836a2e5654be908a7 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 13 Jun 2023 15:53:54 +0100 Subject: [PATCH 4/4] [OVN] Improve Hash Ring logs Debugging Hash Ring problems can be difficult challenge given that prior to this patch the logs were very limited. This patch improves the logging for this feature as follow: 1. Log when a node is added to the ring 2. Log when nodes are removed from the ring 3. Keep track the number of offline nodes and log it upon loading the ring 4. Improve the "Hash Ring is empty" exception with the number of offline nodes found (if 0, means the ovn_hash_ring table has no entries) Closes-Bug: #2023670 Change-Id: Ic90432b5ddea8cf176de159ec7eaafd5fd7bdd6e Signed-off-by: Lucas Alvares Gomes (cherry picked from commit afa20faec3c37bd06346360cadbad0d69e9925f0) [OVN] The all() and count() methods should be inside a DB txn The ``ovn_hash_ring_db`` methods ``get_active_nodes`` and ``count_offline_nodes`` are sending SQL requests that should be issued from inside a READER context. Closes-Bug: #2024447 Change-Id: If06c372a9d5cb1dc1ec1af768abb61f52c2c5abd (cherry picked from commit 0c66dfaed8e1ec00726c3e484e69174779678abd) --- neutron/common/ovn/exceptions.py | 7 ++-- neutron/common/ovn/hash_ring_manager.py | 9 +++- neutron/db/ovn_hash_ring_db.py | 41 +++++++++++++++---- .../tests/unit/db/test_ovn_hash_ring_db.py | 27 ++++++++++++ 4 files changed, 72 insertions(+), 12 deletions(-) diff --git a/neutron/common/ovn/exceptions.py b/neutron/common/ovn/exceptions.py index 7dd30d2c135..b25adff6c47 100644 --- a/neutron/common/ovn/exceptions.py +++ b/neutron/common/ovn/exceptions.py @@ -33,6 +33,7 @@ class StandardAttributeIDNotFound(n_exc.NeutronException): class HashRingIsEmpty(n_exc.NeutronException): - message = _('Hash Ring returned empty when hashing "%(key)s". ' - 'This should never happen in a normal situation, please ' - 'check the status of your cluster') + message = _('Hash Ring returned empty when hashing "%(key)s". All ' + '%(node_count)d nodes were found offline. This should never ' + 'happen in a normal situation, please check the status ' + 'of your cluster') diff --git a/neutron/common/ovn/hash_ring_manager.py b/neutron/common/ovn/hash_ring_manager.py index 9c72269854b..59d6107f4ad 100644 --- a/neutron/common/ovn/hash_ring_manager.py +++ b/neutron/common/ovn/hash_ring_manager.py @@ -38,6 +38,7 @@ def __init__(self, group_name): # Flag to rate limit the caching log self._prev_num_nodes = -1 self.admin_ctx = context.get_admin_context() + self._offline_node_count = 0 @property def _wait_startup_before_caching(self): @@ -92,6 +93,11 @@ def _load_hash_ring(self, refresh=False): self._hash_ring = hashring.HashRing({node.node_uuid for node in nodes}) self._last_time_loaded = timeutils.utcnow() + self._offline_node_count = db_hash_ring.count_offline_nodes( + self.admin_ctx, constants.HASH_RING_NODES_TIMEOUT, + self._group) + LOG.debug("Hash Ring loaded. %d active nodes. %d offline nodes", + len(nodes), self._offline_node_count) def refresh(self): self._load_hash_ring(refresh=True) @@ -108,4 +114,5 @@ def get_node(self, key): # KeyError is raised return self._hash_ring[key].pop() except KeyError: - raise exceptions.HashRingIsEmpty(key=key) + raise exceptions.HashRingIsEmpty( + key=key, node_count=self._offline_node_count) diff --git a/neutron/db/ovn_hash_ring_db.py b/neutron/db/ovn_hash_ring_db.py index 1cea2f6bef2..fd9430d6919 100644 --- a/neutron/db/ovn_hash_ring_db.py +++ b/neutron/db/ovn_hash_ring_db.py @@ -17,12 +17,14 @@ from neutron_lib.db import api as db_api from oslo_config import cfg +from oslo_log import log from oslo_utils import timeutils from oslo_utils import uuidutils from neutron.db.models import ovn as ovn_models CONF = cfg.CONF +LOG = log.getLogger(__name__) # NOTE(ralonsoh): this was migrated from networking-ovn to neutron and should @@ -34,6 +36,8 @@ def add_node(context, group_name, node_uuid=None): with db_api.CONTEXT_WRITER.using(context): context.session.add(ovn_models.OVNHashRing( node_uuid=node_uuid, hostname=CONF.host, group_name=group_name)) + LOG.info('Node %s from host "%s" and group "%s" added to the Hash Ring', + node_uuid, CONF.host, group_name) return node_uuid @@ -42,6 +46,8 @@ def remove_nodes_from_host(context, group_name): context.session.query(ovn_models.OVNHashRing).filter( ovn_models.OVNHashRing.hostname == CONF.host, ovn_models.OVNHashRing.group_name == group_name).delete() + LOG.info('Nodes from host "%s" and group "%s" removed from the Hash Ring', + CONF.host, group_name) def _touch(context, **filter_args): @@ -58,12 +64,31 @@ def touch_node(context, node_uuid): _touch(context, node_uuid=node_uuid) -def get_active_nodes(context, interval, group_name, from_host=False): +def _get_nodes_query(context, interval, group_name, offline=False, + from_host=False): limit = timeutils.utcnow() - datetime.timedelta(seconds=interval) - with db_api.CONTEXT_READER.using(context): - query = context.session.query(ovn_models.OVNHashRing).filter( - ovn_models.OVNHashRing.updated_at >= limit, - ovn_models.OVNHashRing.group_name == group_name) - if from_host: - query = query.filter_by(hostname=CONF.host) - return query.all() + query = context.session.query(ovn_models.OVNHashRing).filter( + ovn_models.OVNHashRing.group_name == group_name) + + if offline: + query = query.filter(ovn_models.OVNHashRing.updated_at < limit) + else: + query = query.filter(ovn_models.OVNHashRing.updated_at >= limit) + + if from_host: + query = query.filter_by(hostname=CONF.host) + + return query + + +@db_api.CONTEXT_READER +def get_active_nodes(context, interval, group_name, from_host=False): + query = _get_nodes_query(context, interval, group_name, + from_host=from_host) + return query.all() + + +@db_api.CONTEXT_READER +def count_offline_nodes(context, interval, group_name): + query = _get_nodes_query(context, interval, group_name, offline=True) + return query.count() diff --git a/neutron/tests/unit/db/test_ovn_hash_ring_db.py b/neutron/tests/unit/db/test_ovn_hash_ring_db.py index e7205d33075..bedc50938a5 100644 --- a/neutron/tests/unit/db/test_ovn_hash_ring_db.py +++ b/neutron/tests/unit/db/test_ovn_hash_ring_db.py @@ -242,3 +242,30 @@ def test_touch_nodes_from_host_different_groups(self): for node in group2: node_db = self._get_node_row(node) self.assertEqual(node_db.created_at, node_db.updated_at) + + def test_count_offline_nodes(self): + self._add_nodes_and_assert_exists(count=3) + + # Assert no nodes are considered offline + self.assertEqual(0, ovn_hash_ring_db.count_offline_nodes( + self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP)) + + # Subtract 60 seconds from utcnow() and touch the nodes to make + # them to appear offline + fake_utcnow = timeutils.utcnow() - datetime.timedelta(seconds=60) + with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: + mock_utcnow.return_value = fake_utcnow + ovn_hash_ring_db.touch_nodes_from_host(self.admin_ctx, + HASH_RING_TEST_GROUP) + + # Now assert that all nodes from our host are seeing as offline + self.assertEqual(3, ovn_hash_ring_db.count_offline_nodes( + self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP)) + + # Touch the nodes again without faking utcnow() + ovn_hash_ring_db.touch_nodes_from_host(self.admin_ctx, + HASH_RING_TEST_GROUP) + + # Assert no nodes are considered offline + self.assertEqual(0, ovn_hash_ring_db.count_offline_nodes( + self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP))