From 8e6c18916ec3d732442aba853c27a44924e1adac Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 3 Feb 2023 21:06:00 +0100 Subject: [PATCH 1/3] Filter subnets by "enable_dhcp" flag using the correct type While other SQL engines can compare interger and boolean types, PostgreSQL needs explicit casting to compare variables. Method "_sync_subnet_dhcp_options" is currently raising the following error: operator does not exist: boolean = integer Closes-Bug: #2004581 Change-Id: I715029c311c4516f3212054c5c72533b12fd0986 (cherry picked from commit 61b2917a3e8596cbb4074024cee0844e319ed551) --- .../plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py index 0de899d024e..d8bf3b74646 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -769,7 +769,7 @@ def _sync_subnet_dhcp_options(self, ctx, db_networks, LOG.debug('OVN-NB Sync DHCP options for Neutron subnets started') db_subnets = {} - filters = {'enable_dhcp': [1]} + filters = {'enable_dhcp': [True]} for subnet in self.core_plugin.get_subnets(ctx, filters=filters): if (subnet['ip_version'] == constants.IP_VERSION_6 and subnet.get('ipv6_address_mode') == constants.IPV6_SLAAC): From 7dcf8be112ed205a6c694c1f3549e08b4234d82d Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 8 Feb 2023 13:14:19 +0100 Subject: [PATCH 2/3] Improve scheduling L3/DHCP agents, missing lower binding indexes This patch is covering an edge case that could happen when the number of DHCP agents ("dhcp_agents_per_network") or L3 agents ("max_l3_agents_per_router") has been reduced and there are more agents assigned than the current number. If the user removes any agent assignation from a L3 router or a DHCP agent, it is possible to remove first the lower binding assigned registers. Now the method ``get_vacant_binding_index`` calculates the number of agents bound and the number required. If a new one is needed, the method returns first the lower binding indexes not used. Closes-Bug: #2006496 Conflicts: neutron/common/_constants.py neutron/objects/l3agent.py Change-Id: I25145c088ffdca47acfcb7add02b1a4a615e4612 (cherry picked from commit 5250598c804a38c55ff78cfb457b73d1b3cd7e07) (cherry picked from commit 0920f17f476ce8b398deea3e54e9f90b5251cfc9) --- neutron/common/_constants.py | 3 ++ neutron/db/l3_agentschedulers_db.py | 4 +- neutron/db/models/l3agent.py | 8 ++-- .../db/network_dhcp_agent_binding/models.py | 9 ++-- neutron/objects/l3agent.py | 3 +- neutron/scheduler/base_scheduler.py | 46 +++++++++++++------ neutron/scheduler/dhcp_agent_scheduler.py | 6 +-- neutron/scheduler/l3_agent_scheduler.py | 5 +- .../unit/scheduler/test_base_scheduler.py | 21 +++++++++ 9 files changed, 75 insertions(+), 30 deletions(-) diff --git a/neutron/common/_constants.py b/neutron/common/_constants.py index b517007e542..3f8eb0c0240 100644 --- a/neutron/common/_constants.py +++ b/neutron/common/_constants.py @@ -78,3 +78,6 @@ AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP, constants.DEVICE_OWNER_DISTRIBUTED, constants.DEVICE_OWNER_AGENT_GW] + +# The lowest binding index for L3 agents and DHCP agents. +LOWEST_AGENT_BINDING_INDEX = 1 diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index 51cb8ca249e..6a2be2c48c6 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -25,9 +25,9 @@ import oslo_messaging from neutron.agent.common import utils as agent_utils +from neutron.common import _constants as n_const from neutron.conf.db import l3_agentschedulers_db from neutron.db import agentschedulers_db -from neutron.db.models import l3agent as rb_model from neutron.extensions import l3agentscheduler from neutron.extensions import router_availability_zone as router_az from neutron.objects import agent as ag_obj @@ -522,7 +522,7 @@ def get_vacant_binding_index(self, context, router_id, bindings = rb_obj.RouterL3AgentBinding.get_objects( context, _pager=pager, router_id=router_id) return base_scheduler.get_vacant_binding_index( - num_agents, bindings, rb_model.LOWEST_BINDING_INDEX, + num_agents, bindings, n_const.LOWEST_AGENT_BINDING_INDEX, force_scheduling=is_manual_scheduling) diff --git a/neutron/db/models/l3agent.py b/neutron/db/models/l3agent.py index 8ca12b3786d..97c68ea7dc7 100644 --- a/neutron/db/models/l3agent.py +++ b/neutron/db/models/l3agent.py @@ -15,10 +15,9 @@ import sqlalchemy as sa from sqlalchemy import orm +from neutron.common import _constants as n_const from neutron.db.models import agent as agent_model -LOWEST_BINDING_INDEX = 1 - class RouterL3AgentBinding(model_base.BASEV2): """Represents binding between neutron routers and L3 agents.""" @@ -37,5 +36,6 @@ class RouterL3AgentBinding(model_base.BASEV2): l3_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) - binding_index = sa.Column(sa.Integer, nullable=False, - server_default=str(LOWEST_BINDING_INDEX)) + binding_index = sa.Column( + sa.Integer, nullable=False, + server_default=str(n_const.LOWEST_AGENT_BINDING_INDEX)) diff --git a/neutron/db/network_dhcp_agent_binding/models.py b/neutron/db/network_dhcp_agent_binding/models.py index d3147df53fd..d30f61a65b3 100644 --- a/neutron/db/network_dhcp_agent_binding/models.py +++ b/neutron/db/network_dhcp_agent_binding/models.py @@ -14,12 +14,10 @@ import sqlalchemy as sa from sqlalchemy import orm +from neutron.common import _constants as n_const from neutron.db.models import agent as agent_model -LOWEST_BINDING_INDEX = 1 - - class NetworkDhcpAgentBinding(model_base.BASEV2): """Represents binding between neutron networks and DHCP agents.""" @@ -38,5 +36,6 @@ class NetworkDhcpAgentBinding(model_base.BASEV2): sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) - binding_index = sa.Column(sa.Integer, nullable=False, - server_default=str(LOWEST_BINDING_INDEX)) + binding_index = sa.Column( + sa.Integer, nullable=False, + server_default=str(n_const.LOWEST_AGENT_BINDING_INDEX)) diff --git a/neutron/objects/l3agent.py b/neutron/objects/l3agent.py index 07606e76f3e..0c86e7973de 100644 --- a/neutron/objects/l3agent.py +++ b/neutron/objects/l3agent.py @@ -17,6 +17,7 @@ from sqlalchemy import sql +from neutron.common import _constants as n_const from neutron.db.models import agent as agent_model from neutron.db.models import l3_attrs from neutron.db.models import l3agent @@ -36,7 +37,7 @@ class RouterL3AgentBinding(base.NeutronDbObject): 'router_id': common_types.UUIDField(), 'l3_agent_id': common_types.UUIDField(), 'binding_index': obj_fields.IntegerField( - default=l3agent.LOWEST_BINDING_INDEX), + default=n_const.LOWEST_AGENT_BINDING_INDEX), } # TODO(ihrachys) return OVO objects not models diff --git a/neutron/scheduler/base_scheduler.py b/neutron/scheduler/base_scheduler.py index 35eb2e348de..a8cd7741bbd 100644 --- a/neutron/scheduler/base_scheduler.py +++ b/neutron/scheduler/base_scheduler.py @@ -99,24 +99,44 @@ def get_vacant_binding_index(num_agents, bindings, lowest_binding_index, always return an index, even if this number exceeds the maximum configured number of agents. """ - binding_indices = [b.binding_index for b in bindings] - all_indices = set(range(lowest_binding_index, num_agents + 1)) - open_slots = sorted(list(all_indices - set(binding_indices))) + def get_open_slots(binding_indices, lowest_binding_index, max_number): + """Returns an ordered list of free slots + + This list starts from the lowest available binding index. The number + of open slots and "binding_indices" (those already taken), must be + equal to "max_number". The list returned can be [], if + len(max_number) == len(binding_indices) (that means there are no free + slots). + """ + # NOTE(ralonsoh): check LP#2006496 for more context. The DHCP/router + # binding indexes could not be a sequential list starting from + # lowest_binding_index (that is usually 1). + open_slots = set(binding_indices) + idx = lowest_binding_index + while len(open_slots) < max_number: + # Increase sequentially the "open_slots" set until we have the + # required number of slots, that is "num_agents". + open_slots.add(idx) + idx += 1 + + # Remove those indices already used. + open_slots -= set(binding_indices) + return sorted(list(open_slots)) + binding_indices = [b.binding_index for b in bindings] + open_slots = get_open_slots(binding_indices, lowest_binding_index, + num_agents) if open_slots: return open_slots[0] if not force_scheduling: return -1 - # Last chance: if this is a manual scheduling, we're gonna allow + # Last chance: if this is a manual scheduling, we're going to allow # creation of a binding_index even if it will exceed - # dhcp_agents_per_network. - if max(binding_indices) == len(binding_indices): - return max(binding_indices) + 1 - else: - # Find binding index set gaps and return first free one. - all_indices = set(range(lowest_binding_index, - max(binding_indices) + 1)) - open_slots = sorted(list(all_indices - set(binding_indices))) - return open_slots[0] + # dhcp_agents_per_network/max_l3_agents_per_router. + while not open_slots: + num_agents += 1 + open_slots = get_open_slots(binding_indices, lowest_binding_index, + num_agents) + return open_slots[0] diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py index 6b835a6bcc4..0cd9c1a9470 100644 --- a/neutron/scheduler/dhcp_agent_scheduler.py +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -25,7 +25,7 @@ from oslo_log import log as logging from neutron.agent.common import utils as agent_utils -from neutron.db.network_dhcp_agent_binding import models as ndab_model +from neutron.common import _constants as n_const from neutron.objects import agent as agent_obj from neutron.objects import network from neutron.scheduler import base_resource_filter @@ -195,7 +195,7 @@ def get_vacant_network_dhcp_agent_binding_index( bindings = network.NetworkDhcpAgentBinding.get_objects( context, network_id=network_id) return base_scheduler.get_vacant_binding_index( - num_agents, bindings, ndab_model.LOWEST_BINDING_INDEX, + num_agents, bindings, n_const.LOWEST_AGENT_BINDING_INDEX, force_scheduling=force_scheduling) def bind(self, context, agents, network_id, force_scheduling=False): @@ -205,7 +205,7 @@ def bind(self, context, agents, network_id, force_scheduling=False): for agent in agents: binding_index = self.get_vacant_network_dhcp_agent_binding_index( context, network_id, force_scheduling) - if binding_index < ndab_model.LOWEST_BINDING_INDEX: + if binding_index < n_const.LOWEST_AGENT_BINDING_INDEX: LOG.debug('Unable to find a vacant binding_index for ' 'network %(network_id)s and agent %(agent_id)s', {'network_id': network_id, diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py index 632cd7cce84..8f51abc4ffa 100644 --- a/neutron/scheduler/l3_agent_scheduler.py +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -26,6 +26,7 @@ from oslo_db import exception as db_exc from oslo_log import log as logging +from neutron.common import _constants as n_const from neutron.common import utils from neutron.conf.db import l3_hamode_db from neutron.db.models import l3agent as rb_model @@ -185,7 +186,7 @@ def bind_router(self, plugin, context, router_id, agent_id, return if not is_ha: - binding_index = rb_model.LOWEST_BINDING_INDEX + binding_index = n_const.LOWEST_AGENT_BINDING_INDEX if rb_obj.RouterL3AgentBinding.objects_exist( context, router_id=router_id, binding_index=binding_index): LOG.debug('Non-HA router %s has already been scheduled', @@ -194,7 +195,7 @@ def bind_router(self, plugin, context, router_id, agent_id, else: binding_index = plugin.get_vacant_binding_index( context, router_id, is_manual_scheduling) - if binding_index < rb_model.LOWEST_BINDING_INDEX: + if binding_index < n_const.LOWEST_AGENT_BINDING_INDEX: LOG.debug('Unable to find a vacant binding_index for ' 'router %(router_id)s and agent %(agent_id)s', {'router_id': router_id, diff --git a/neutron/tests/unit/scheduler/test_base_scheduler.py b/neutron/tests/unit/scheduler/test_base_scheduler.py index 2acae6ab803..6a6a82f2bb8 100644 --- a/neutron/tests/unit/scheduler/test_base_scheduler.py +++ b/neutron/tests/unit/scheduler/test_base_scheduler.py @@ -38,6 +38,21 @@ def test_get_vacant_binding_index_several_agents(self): 3, [mock.Mock(binding_index=1), mock.Mock(binding_index=3)], 1) self.assertEqual(2, ret) + # Binding list starting in 2, two elements, required three. + ret = base_scheduler.get_vacant_binding_index( + 3, [mock.Mock(binding_index=2), mock.Mock(binding_index=3)], 1) + self.assertEqual(1, ret) + + # Binding list starting in 2, two elements, required two. + ret = base_scheduler.get_vacant_binding_index( + 2, [mock.Mock(binding_index=2), mock.Mock(binding_index=3)], 1) + self.assertEqual(-1, ret) + + # Binding list starting in 2, two elements, required one. + ret = base_scheduler.get_vacant_binding_index( + 1, [mock.Mock(binding_index=2), mock.Mock(binding_index=3)], 1) + self.assertEqual(-1, ret) + def test_get_vacant_binding_index_force_scheduling(self): ret = base_scheduler.get_vacant_binding_index( 3, [mock.Mock(binding_index=1), mock.Mock(binding_index=2), @@ -50,3 +65,9 @@ def test_get_vacant_binding_index_force_scheduling(self): mock.Mock(binding_index=3), mock.Mock(binding_index=4), mock.Mock(binding_index=5)], 1, force_scheduling=True) self.assertEqual(6, ret) + + ret = base_scheduler.get_vacant_binding_index( + 3, [mock.Mock(binding_index=2), mock.Mock(binding_index=3), + mock.Mock(binding_index=4), mock.Mock(binding_index=5), + mock.Mock(binding_index=6)], 1, force_scheduling=True) + self.assertEqual(1, ret) From 9df4a7e398fa69315a759737bbb991c892c73e80 Mon Sep 17 00:00:00 2001 From: Bence Romsics Date: Fri, 20 Jan 2023 16:26:21 +0100 Subject: [PATCH 3/3] Do not ignore attributes in bulk port create With unit tests that would have caught the bug. Change-Id: Ia4a68bdccecfbcb9d1aa49e2b14e06d139891c0f Closes-Bug: #2003553 (cherry picked from commit ed68ba4a4cb622a67a4b28065da7f1ef7777e0dd) (cherry picked from commit 1b086f1c1fd0879224e61b05e3d4bb54486ea62c) --- neutron/plugins/ml2/plugin.py | 6 +-- neutron/tests/unit/plugins/ml2/test_plugin.py | 49 +++++++++++++++++++ 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index f7ef2e10963..203f74fc477 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -1717,13 +1717,13 @@ def _create_port_bulk(self, context, port_list, network_cache): self._process_port_binding(mech_context, port_dict) # process allowed address pairs - db_port_obj[addr_apidef.ADDRESS_PAIRS] = ( + port_dict[addr_apidef.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, port_dict, - port_dict.get(addr_apidef.ADDRESS_PAIRS))) + pdata.get(addr_apidef.ADDRESS_PAIRS))) # handle DHCP setup - dhcp_opts = port_dict.get(edo_ext.EXTRADHCPOPTS, []) + dhcp_opts = pdata.get(edo_ext.EXTRADHCPOPTS, []) self._process_port_create_extra_dhcp_opts(context, port_dict, dhcp_opts) # send PRECOMMIT_CREATE notification diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index 31cddfc7c13..624fda289b0 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -1688,6 +1688,55 @@ def test_create_ports_bulk_ip_allocation_without_mac_no_net(self): ctx, ports ) + def test_create_ports_bulk_with_allowed_address_pairs(self): + ctx = context.get_admin_context() + with self.network() as net: + + aap = [{ + 'ip_address': '1.2.3.4', + 'mac_address': '01:23:45:67:89:ab', + }] + ports_in = { + 'ports': [{'port': { + 'allowed_address_pairs': aap, + 'network_id': net['network']['id'], + 'project_id': self._tenant_id, + + 'admin_state_up': True, + 'device_id': '', + 'device_owner': '', + 'fixed_ips': constants.ATTR_NOT_SPECIFIED, + 'name': '', + 'security_groups': constants.ATTR_NOT_SPECIFIED, + }}]} + ports_out = self.plugin.create_port_bulk(ctx, ports_in) + self.assertEqual(aap, ports_out[0]['allowed_address_pairs']) + + def test_create_ports_bulk_with_extra_dhcp_opts(self): + ctx = context.get_admin_context() + with self.network() as net: + + edo = [{ + 'opt_name': 'domain-name-servers', + 'opt_value': '10.0.0.1', + 'ip_version': 4, + }] + ports_in = { + 'ports': [{'port': { + 'extra_dhcp_opts': edo, + 'network_id': net['network']['id'], + 'project_id': self._tenant_id, + + 'admin_state_up': True, + 'device_id': '', + 'device_owner': '', + 'fixed_ips': constants.ATTR_NOT_SPECIFIED, + 'name': '', + 'security_groups': constants.ATTR_NOT_SPECIFIED, + }}]} + ports_out = self.plugin.create_port_bulk(ctx, ports_in) + self.assertEqual(edo, ports_out[0]['extra_dhcp_opts']) + def test_delete_port_no_notify_in_disassociate_floatingips(self): ctx = context.get_admin_context() plugin = directory.get_plugin()