From 4b1e0bc5e4e94560ff91e76df908c32b574dd565 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 26 Jun 2023 21:17:48 +0530 Subject: [PATCH 1/2] [DHCP agent] Add route to OVN metadata port if exists When DHCP agent is deployed with ml2/ovn for baremetal ports, ovn metadata route is not added. This patch adds route via ovn metadata port if exists so baremetal nodes can fetch metadata. Closes-Bug: #1982569 Related-Bug: https://bugzilla.redhat.com/show_bug.cgi?id=2213862 Change-Id: I12e496d70bb6db707b317d0aeb6e4edd6c43571e (cherry picked from commit 82f2a21d1c9e27999d3fd7006a7ecf961039a370) --- neutron/agent/linux/dhcp.py | 64 ++++++++++++++----- neutron/tests/unit/agent/dhcp/test_agent.py | 43 ++++++++++++- neutron/tests/unit/agent/linux/test_dhcp.py | 31 +++++++++ ...nt-ovn-metadata-port-33a654ccb9554c65.yaml | 9 +++ 4 files changed, 128 insertions(+), 19 deletions(-) create mode 100644 releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index b6e24e3c3cc..5f6c9c1e508 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -1140,6 +1140,14 @@ def _output_opts_file(self): file_utils.replace_file(name, '\n'.join(options)) return name + def _get_ovn_metadata_port_ip(self, subnet): + m_ports = [port for port in self.network.ports if + self._is_ovn_metadata_port(port, self.network.id)] + if m_ports: + for fixed_ip in m_ports[0].fixed_ips: + if fixed_ip.subnet_id == subnet.id: + return fixed_ip.ip_address + def _generate_opts_per_subnet(self): options = [] subnets_without_nameservers = set() @@ -1193,23 +1201,33 @@ def _generate_opts_per_subnet(self): else: host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) - # Add host routes for isolated network segments - - if ((self.conf.force_metadata or - (isolated_subnets[subnet.id] and - self.conf.enable_isolated_metadata)) and - subnet.ip_version == 4): - subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id) - if subnet_dhcp_ip: + # Determine metadata port route + if subnet.ip_version == constants.IP_VERSION_4: + metadata_route_ip = None + # NOTE: OVN metadata port IP is used in a case when the DHCP + # agent is deployed in the ML2/OVN enviroment where the native + # ovn-controller dhcp is disabled. The ovn metadata route + # takes precedence over native force_metadata and + # enable_isolated_metadata routes settings. + ovn_metadata_port_ip = self._get_ovn_metadata_port_ip(subnet) + if ovn_metadata_port_ip: + metadata_route_ip = ovn_metadata_port_ip + + elif (self.conf.force_metadata or + (isolated_subnets[subnet.id] and + self.conf.enable_isolated_metadata)): + subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id) + if subnet_dhcp_ip: + metadata_route_ip = subnet_dhcp_ip + + if not isolated_subnets[subnet.id] and gateway: + metadata_route_ip = gateway + + if metadata_route_ip: host_routes.append( - '%s,%s' % (constants.METADATA_CIDR, subnet_dhcp_ip) + '%s,%s' % (constants.METADATA_CIDR, metadata_route_ip) ) - elif not isolated_subnets[subnet.id] and gateway: - host_routes.append( - '%s,%s' % (constants.METADATA_CIDR, gateway) - ) - if subnet.ip_version == 4: for s in self._get_all_subnets(self.network): sub_segment_id = getattr(s, 'segment_id', None) if (s.ip_version == 4 and @@ -1374,13 +1392,21 @@ def has_metadata_subnet(subnets): return True return False + @staticmethod + def _is_ovn_metadata_port(port, network_id): + return (port.device_id == 'ovnmeta-' + network_id and + port.device_owner == constants.DEVICE_OWNER_DISTRIBUTED) + @classmethod def should_enable_metadata(cls, conf, network): """Determine whether the metadata proxy is needed for a network - This method returns True for truly isolated networks (ie: not attached - to a router) when enable_isolated_metadata is True, or for all the - networks when the force_metadata flags is True. + If the given network contains a ovn metadata port then this method + assumes that the ovn metadata service is in use and this metadata + service is not required, method returns False. For other cases this + method returns True for truly isolated networks (ie: not attached to a + router) when enable_isolated_metadata is True, or for all the networks + when the force_metadata flags is True. This method also returns True when enable_metadata_network is True, and the network passed as a parameter has a subnet in the link-local @@ -1389,6 +1415,10 @@ def should_enable_metadata(cls, conf, network): providing access to the metadata service via logical routers built with 3rd party backends. """ + for port in network.ports: + if cls._is_ovn_metadata_port(port, network.id): + return False + all_subnets = cls._get_all_subnets(network) dhcp_subnets = [s for s in all_subnets if s.enable_dhcp] if not dhcp_subnets: diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 8268d17e140..367eebc156b 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -152,10 +152,27 @@ fake_ipv6_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', device_owner='', + device_id='', mac_address='aa:bb:cc:dd:ee:99', network_id=FAKE_NETWORK_UUID, fixed_ips=[fake_fixed_ipv6]) +fake_ovn_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', + device_owner='', + device_id='', + mac_address='aa:bb:cc:dd:ee:98', + network_id=FAKE_NETWORK_UUID, + fixed_ips=[fake_fixed_ip2]) + +fake_ovn_metadata_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', + device_owner=const. + DEVICE_OWNER_DISTRIBUTED, + device_id='ovnmeta-{}'.format( + FAKE_NETWORK_UUID), + mac_address='aa:bb:cc:dd:ee:99', + network_id=FAKE_NETWORK_UUID, + fixed_ips=[fake_fixed_ip1]) + fake_meta_port = dhcp.DictModel(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id=FAKE_NETWORK_UUID, @@ -191,6 +208,12 @@ subnets=[fake_ipv6_subnet], ports=[fake_ipv6_port]) +fake_ovn_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, + project_id=FAKE_PROJECT_ID, + admin_state_up=True, + subnets=[fake_ipv6_subnet], + ports=[fake_ovn_metadata_port, fake_ovn_port]) + fake_network_ipv6_ipv4 = dhcp.NetModel( id=FAKE_NETWORK_UUID, project_id=FAKE_PROJECT_ID, @@ -850,7 +873,7 @@ def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS): default_cmd_callback=mock.ANY) def _enable_dhcp_helper(self, network, enable_isolated_metadata=False, - is_isolated_network=False): + is_isolated_network=False, is_ovn_network=False): self.dhcp._process_monitor = mock.Mock() if enable_isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) @@ -860,7 +883,8 @@ def _enable_dhcp_helper(self, network, enable_isolated_metadata=False, mock.call.get_network_info(network.id)]) self.call_driver.assert_called_once_with('enable', network) self.cache.assert_has_calls([mock.call.put(network)]) - if is_isolated_network and enable_isolated_metadata: + if (is_isolated_network and enable_isolated_metadata and not + is_ovn_network): self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable()], any_order=True) @@ -905,6 +929,21 @@ def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self): enable_isolated_metadata=True, is_isolated_network=False) + def test_enable_dhcp_helper_enable_metadata_ovn_network(self): + # Metadata should not be enabled when the dhcp agent is used + # in ML2/OVN where the ovn metadata agent is responsible for the + # metadata service. + self._enable_dhcp_helper(fake_ovn_network, is_ovn_network=True) + + def test_enable_dhcp_helper_ovn_network_with_enable_isolated_metadata( + self): + # Metadata should not be enabled when the dhcp agent is used + # in ML2/OVN where the ovn metadata agent is responsible for the + # metadata service. Even if the enable_isolated_metadata is enabled + self._enable_dhcp_helper(fake_ovn_network, + enable_isolated_metadata=True, + is_ovn_network=True) + def test_enable_dhcp_helper_enable_metadata_empty_network(self): self._enable_dhcp_helper(empty_network, enable_isolated_metadata=True, diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index db5ced7af22..d33b479461d 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -88,6 +88,19 @@ def __init__(self): self.extra_dhcp_opts = [] +class FakeOvnMetadataPort(object): + def __init__(self): + self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' + self.admin_state_up = True + self.device_owner = constants.DEVICE_OWNER_DISTRIBUTED + self.fixed_ips = [ + FakeIPAllocation('192.168.0.10', + 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + self.mac_address = '00:00:80:aa:bb:ee' + self.device_id = 'ovnmeta-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.extra_dhcp_opts = [] + + class FakeReservedPort(object): def __init__(self, id='reserved-aaaa-aaaa-aaaa-aaaaaaaaaaa'): self.admin_state_up = True @@ -755,6 +768,14 @@ def __init__(self): self.namespace = 'qdhcp-ns' +class FakeNetworkDhcpandOvnMetadataPort(object): + def __init__(self): + self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.subnets = [FakeV4Subnet()] + self.ports = [FakePort1(), FakeDhcpPort(), FakeOvnMetadataPort()] + self.namespace = 'qdhcp-ns' + + class FakeDualNetworkGatewayRoute(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' @@ -3050,6 +3071,10 @@ def test_has_metadata_subnet_returns_false(self): self.assertFalse(dhcp.Dnsmasq.has_metadata_subnet( [FakeV4Subnet()])) + def test_should_enable_metadata_ovn_metadata_port_returns_false(self): + self.assertFalse(dhcp.Dnsmasq.should_enable_metadata( + self.conf, FakeNetworkDhcpandOvnMetadataPort())) + def test_should_enable_metadata_isolated_network_returns_true(self): self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4NetworkNoRouter())) @@ -3098,6 +3123,12 @@ def test__generate_opts_per_subnet_no_metadata(self): 'force_metadata': False} self._test__generate_opts_per_subnet_helper(config, False) + def test__generate_opts_per_subnet_with_metadata_port(self): + config = {'enable_isolated_metadata': False, + 'force_metadata': False} + self._test__generate_opts_per_subnet_helper(config, True, + network_class=FakeNetworkDhcpandOvnMetadataPort) + def test__generate_opts_per_subnet_isolated_metadata_with_router(self): config = {'enable_isolated_metadata': True, 'force_metadata': False} diff --git a/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml b/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml new file mode 100644 index 00000000000..49f73c68eab --- /dev/null +++ b/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixed the scenario where the DHCP agent is deployed in conjunction with + the OVN metadata agent in order to serve metadata for baremetal nodes. + In this scenario, the DHCP agent would not set the route needed for the + OVN metadata agent service resulting in baremetal nodes not being able + to query the metadata service. For more information see + `bug 1982569 `_. From bef1f7a4861a1d19e3a9003ffed303dd47b70707 Mon Sep 17 00:00:00 2001 From: Luis Tomas Bolivar Date: Mon, 13 Nov 2023 16:42:51 +0100 Subject: [PATCH 2/2] Ensure ovn loadbalancer FIPs are centralized upon neutron restarts When neutron server restarts the mac address for NAT entries related to ovn-lb FIPs gets re-added, distributing the traffic that should be centralized and therefore breaking the connectivity. This happens due to the port being down. This patch is ensuring the MAC entry is only being readded in case the port is UP Closes-Bug: #2042938 Change-Id: I6203009750a4e589eeb808f842cb522d61476179 (cherry picked from commit f2a3020cf0a46dbd896c5f7b4b4f6643d32a6b4a) --- .../drivers/ovn/mech_driver/mech_driver.py | 19 +++++------ .../ovn/mech_driver/test_mech_driver.py | 32 ++++++++++++------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index 081ecd80733..d743ba80f91 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -1048,7 +1048,7 @@ def get_workers(self): # See doc/source/design/ovn_worker.rst for more details. return [worker.MaintenanceWorker()] - def _update_dnat_entry_if_needed(self, port_id): + def _update_dnat_entry_if_needed(self, port_id, up=True): """Update DNAT entry if using distributed floating ips.""" if not self.nb_ovn: self.nb_ovn = self._ovn_client._nb_idl @@ -1070,13 +1070,14 @@ def _update_dnat_entry_if_needed(self, port_id): nat['external_mac']})).execute() if ovn_conf.is_ovn_distributed_floating_ip(): - mac = nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY) - if mac and nat['external_mac'] != mac: - LOG.debug("Setting external_mac of port %s to %s", - port_id, mac) - self.nb_ovn.db_set( - 'NAT', nat['_uuid'], ('external_mac', mac)).execute( - check_error=True) + if up: + mac = nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY) + if mac and nat['external_mac'] != mac: + LOG.debug("Setting external_mac of port %s to %s", + port_id, mac) + self.nb_ovn.db_set( + 'NAT', nat['_uuid'], ('external_mac', mac)).execute( + check_error=True) else: if nat['external_mac']: LOG.debug("Clearing up external_mac of port %s", port_id) @@ -1139,7 +1140,7 @@ def set_port_status_down(self, port_id): # to prevent another entity from bypassing the block with its own # port status update. LOG.info("OVN reports status down for port: %s", port_id) - self._update_dnat_entry_if_needed(port_id) + self._update_dnat_entry_if_needed(port_id, False) admin_context = n_context.get_admin_context() try: db_port = ml2_db.get_port(admin_context, port_id) diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py index 8f3d5d26757..b5d4775ee4d 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py @@ -1112,7 +1112,7 @@ def _test_set_port_status_down(self, is_compute_port=False): resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) - ude.assert_called_once_with(port1['port']['id']) + ude.assert_called_once_with(port1['port']['id'], False) # If the port does NOT bellong to compute, do not notify Nova # about it's status changes @@ -1164,7 +1164,7 @@ def test_set_port_status_concurrent_delete(self): resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) - ude.assert_called_once_with(port1['port']['id']) + ude.assert_called_once_with(port1['port']['id'], False) def test_bind_port_unsupported_vnic_type(self): fake_port = fakes.FakePort.create_one_port( @@ -2358,7 +2358,7 @@ def test_agent_with_nb_cfg_timestamp_not_timeout(self): self.assertTrue(agent.alive, "Agent of type %s alive=%s" % ( agent.agent_type, agent.alive)) - def _test__update_dnat_entry_if_needed(self, dvr=True): + def _test__update_dnat_entry_if_needed(self, up=True, dvr=True): if dvr: ovn_conf.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') @@ -2374,25 +2374,33 @@ def _test__update_dnat_entry_if_needed(self, dvr=True): fake_db_find.execute.return_value = [nat_row] self.nb_ovn.db_find.return_value = fake_db_find - self.mech_driver._update_dnat_entry_if_needed(port_id) + self.mech_driver._update_dnat_entry_if_needed(port_id, up=up) - if dvr: + if up and dvr: # Assert that we are setting the external_mac in the NAT table self.nb_ovn.db_set.assert_called_once_with( 'NAT', fake_nat_uuid, ('external_mac', fake_ext_mac_key)) - self.nb_ovn.db_clear.assert_not_called() else: - self.nb_ovn.db_set.assert_not_called() - # Assert that we are cleaning the external_mac from the NAT table - self.nb_ovn.db_clear.assert_called_once_with( - 'NAT', fake_nat_uuid, 'external_mac') + if dvr: + self.nb_ovn.db_set.assert_not_called() + else: + # Assert that we are cleaning the external_mac from the NAT + # table + self.nb_ovn.db_clear.assert_called_once_with( + 'NAT', fake_nat_uuid, 'external_mac') - def test__update_dnat_entry_if_needed_dvr(self): + def test__update_dnat_entry_if_needed_up_dvr(self): self._test__update_dnat_entry_if_needed() - def test__update_dnat_entry_if_needed_no_dvr(self): + def test__update_dnat_entry_if_needed_up_no_dvr(self): self._test__update_dnat_entry_if_needed(dvr=False) + def test__update_dnat_entry_if_needed_down_dvr(self): + self._test__update_dnat_entry_if_needed(up=False) + + def test__update_dnat_entry_if_needed_down_no_dvr(self): + self._test__update_dnat_entry_if_needed(up=False, dvr=False) + @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_router_ports') def _test_update_network_fragmentation(self, new_mtu, expected_opts, grps):