From 0b7af2ba5378bb6d4b2cde1b3564a268107a97c0 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 4 Jun 2024 12:23:27 -0400 Subject: [PATCH 1/4] Fix regex lines in zuul.d/* files Commit 260c968118934 broke the gate by causing jobs to not get run when it added RE2 compatibility for irrelevant-files. Digging found that RE2 doesn't support negative lookahead (and won't ever), so it's impossible to replace the previous pcre filter with a similar RE2 filter. Instead of reverting to the original filter, which is considered obsolete by zuul, this patch fixes the issue by explicitly listing all files under zuul.d/ except the one that we actually want to trigger the jobs: zuul.d/project.yaml. Listing all the files in the directory for every job is not ideal, and we may revisit it later, or perhaps even reconsider the extensive use of irrelevant-files in the neutron tree. This will have to wait for when the gate is in better shape though. [0] https://github.com/google/re2/issues/156 Conflicts: zuul.d/job-templates.yaml zuul.d/project.yaml Related-bug: #2065821 Change-Id: I3bba89ac14414c6b7d375072ae92d2e0b5497736 (cherry picked from commit 11027e3e1ef9a58d5b2faa575a3764bd33cd2a08) (cherry picked from commit 0afdfb0ad5a4b2ede06624b8732a0b14965475df) (cherry picked from commit af8d6155179753e1a6a7eddcc2bd5251879aca01) --- zuul.d/base.yaml | 16 ++++++++++++-- zuul.d/grenade.yaml | 16 ++++++++++++-- zuul.d/job-templates.yaml | 8 ++++++- zuul.d/project.yaml | 8 ++++++- zuul.d/rally.yaml | 16 ++++++++++++-- zuul.d/tempest-multinode.yaml | 16 ++++++++++++-- zuul.d/tempest-singlenode.yaml | 40 +++++++++++++++++++++++++++++----- 7 files changed, 105 insertions(+), 15 deletions(-) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 73b93f82071..df0256625e7 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -33,7 +33,13 @@ - ^roles/add_mariadb_repo/.*$ - ^roles/nftables/.*$ - ^rally-jobs/.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml vars: configure_swap_size: 8192 Q_BUILD_OVS_FROM_GIT: True @@ -96,7 +102,13 @@ - ^roles/add_mariadb_repo/.*$ - ^roles/nftables/.*$ - ^rally-jobs/.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-fullstack-with-uwsgi diff --git a/zuul.d/grenade.yaml b/zuul.d/grenade.yaml index 9a30c984734..6268cc23bbe 100644 --- a/zuul.d/grenade.yaml +++ b/zuul.d/grenade.yaml @@ -34,7 +34,13 @@ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - ^vagrant/.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml vars: grenade_devstack_localrc: shared: @@ -249,7 +255,13 @@ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - ^vagrant/.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml roles: - zuul: openstack/neutron-tempest-plugin required-projects: diff --git a/zuul.d/job-templates.yaml b/zuul.d/job-templates.yaml index b5ee5374652..cea3a7fd15c 100644 --- a/zuul.d/job-templates.yaml +++ b/zuul.d/job-templates.yaml @@ -20,7 +20,13 @@ - ^playbooks/.*$ - ^roles/.*$ - ^rally-jobs/.*$ - - ^zuul.d/(?!(job-templates)).*\.yaml + # Ignore everything except for zuul.d/job-templates.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/project.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - openstack-tox-py310: # from openstack-python3-jobs template timeout: 3600 irrelevant-files: *irrelevant-files diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 37e2fd06278..2fae8106511 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -65,7 +65,13 @@ - ^neutron/scheduler/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - neutron-ovn-tempest-ovs-release-ubuntu-old: irrelevant-files: *ovn-irrelevant-files diff --git a/zuul.d/rally.yaml b/zuul.d/rally.yaml index c3c70ff8919..9b8082756e8 100644 --- a/zuul.d/rally.yaml +++ b/zuul.d/rally.yaml @@ -80,7 +80,13 @@ - ^neutron/common/ovn/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-ovn-rally-task @@ -123,7 +129,13 @@ - ^neutron/scheduler/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml vars: devstack_plugins: neutron: https://opendev.org/openstack/neutron diff --git a/zuul.d/tempest-multinode.yaml b/zuul.d/tempest-multinode.yaml index 2056939dbcb..0d7e70cd6ca 100644 --- a/zuul.d/tempest-multinode.yaml +++ b/zuul.d/tempest-multinode.yaml @@ -73,7 +73,13 @@ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - ^vagrant/.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml vars: tox_envlist: integrated-network devstack_localrc: @@ -399,7 +405,13 @@ - ^neutron/scheduler/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml roles: - zuul: zuul/zuul-jobs - zuul: openstack/neutron-tempest-plugin diff --git a/zuul.d/tempest-singlenode.yaml b/zuul.d/tempest-singlenode.yaml index 7721eddaab9..8e26068a024 100644 --- a/zuul.d/tempest-singlenode.yaml +++ b/zuul.d/tempest-singlenode.yaml @@ -83,7 +83,13 @@ - ^neutron/common/ovn/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-ovs-tempest-dvr @@ -145,7 +151,13 @@ - ^neutron/common/ovn/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-ovs-tempest-iptables_hybrid @@ -248,7 +260,13 @@ - ^neutron/plugins/ml2/drivers/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-ovn-tempest-mariadb-full @@ -333,7 +351,13 @@ - ^vagrant/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-ovn-tempest-with-uwsgi-loki @@ -473,7 +497,13 @@ - ^neutron/scheduler/.*$ - ^roles/.*functional.*$ - ^playbooks/.*functional.*$ - - ^zuul.d/(?!(project)).*\.yaml + # Ignore everything except for zuul.d/project.yaml + - ^zuul.d/base.yaml + - ^zuul.d/grenade.yaml + - ^zuul.d/job-templates.yaml + - ^zuul.d/rally.yaml + - ^zuul.d/tempest-multinode.yaml + - ^zuul.d/tempest-singlenode.yaml - job: name: neutron-ovn-tempest-ovs-release From dfd6696a54fca3ffc776a565f342efa18b65796a Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 26 Jul 2024 12:02:27 +0200 Subject: [PATCH 2/4] Fix setting correct 'reside-on-chassis-redirect' in the maintenance task Setting of the 'reside-on-chassis-redirect' was skipped for LRP ports of the provider tenant networks in patch [1] but later patch [2] removed this limitation from the ovn_client but not from the maintenance task. Due to that this option wasn't updated after e.g. change of the 'enable_distributed_floating_ip' config option and connectivity to the existing Floating IPs associated to the ports in vlan tenant networks was broken. This patch removes that limitation and this option is now updated for all of the Logical_Router_Ports for vlan networks, not only for external gateways. [1] https://review.opendev.org/c/openstack/neutron/+/871252 [2] https://review.opendev.org/c/openstack/neutron/+/878450 Conflicts: neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py Closes-bug: #2073987 Change-Id: I56e791847c8f4f3a07f543689bf22fde8160c9b7 (cherry picked from commit 4b1bfb93e380b8dce78935395b2cda57076e5476) --- .../plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py | 5 +---- .../plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py | 2 -- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py index 2112e023f6f..c3b2fb31056 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py @@ -772,10 +772,7 @@ def check_vlan_distributed_ports(self): # Get router ports belonging to VLAN networks vlan_nets = self._ovn_client._plugin.get_networks( context, {pnet.NETWORK_TYPE: [n_const.TYPE_VLAN]}) - # FIXME(ltomasbo): Once Bugzilla 2162756 is fixed the - # is_provider_network check should be removed - vlan_net_ids = [vn['id'] for vn in vlan_nets - if not utils.is_provider_network(vn)] + vlan_net_ids = [vn['id'] for vn in vlan_nets] router_ports = self._ovn_client._plugin.get_ports( context, {'network_id': vlan_net_ids, 'device_owner': n_const.ROUTER_PORT_OWNERS}) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index b94fea7f014..5fb56994bf8 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -1610,8 +1610,6 @@ def _gen_router_port_options(self, port, network=None): # logical router port is centralized in the chassis hosting the # distributed gateway port. # https://github.com/openvswitch/ovs/commit/85706c34d53d4810f54bec1de662392a3c06a996 - # FIXME(ltomasbo): Once Bugzilla 2162756 is fixed the - # is_provider_network check should be removed if network.get(pnet.NETWORK_TYPE) == const.TYPE_VLAN: reside_redir_ch = self._get_reside_redir_for_gateway_port( port['device_id']) From 406b1e00c4caa96b7933a6701ded255ac7874438 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 30 Jul 2024 14:17:44 +0200 Subject: [PATCH 3/4] Lower spacing time of the OVN maintenance tasks which should be run once Some of the OVN maintenance tasks are expected to be run just once and then they raise periodic.NeverAgain() to not be run anymore. Those tasks also require to have acquried ovn db lock so that only one of the maintenance workers really runs them. All those tasks had set 600 seconds as a spacing time so they were run every 600 seconds. This works fine usually but that may cause small issue in the environments were Neutron is run in POD as k8s/openshift application. In such case, when e.g. configuration of neutron is updated, it may happen that first new POD with Neutron is spawned and only once it is already running, k8s will stop old POD. Because of that maintenance worker running in the new neutron-server POD will not acquire lock on the OVN DB (old POD still holds the lock) and will not run all those maintenance tasks immediately. After old POD will be terminated, one of the new PODs will at some point acquire that lock and then will run all those maintenance tasks but this would cause 600 seconds delay in running them. To avoid such long waiting time to run those maintenance tasks, this patch lowers its spacing time from 600 to just 5 seconds. Additionally maintenance tasks which are supposed to be run only once and only by the maintenance worker which has acquired ovn db lock will now be stopped (periodic.NeverAgain will be raised) after 100 attempts of run. This will avoid running them every 5 seconds forever on the workers which don't acquire lock on the OVN DB at all. Conflicts: neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py Closes-bug: #2074209 Change-Id: Iabb4bb427588c1a5da27a5d313f75b5bd23805b2 (cherry picked from commit 04c217bcd0eda07d52a60121b6f86236ba6e26ee) (cherry picked from commit 78900c12f3c398b9f974531bbf61ce5c8e852770) (cherry picked from commit 910821805907a1800d0dd875e9af41352c80cf5a) --- neutron/common/ovn/constants.py | 2 + .../ovn/mech_driver/ovsdb/maintenance.py | 91 +++++++++++++++---- .../ovn/mech_driver/ovsdb/test_maintenance.py | 57 ++++++++++++ 3 files changed, 134 insertions(+), 16 deletions(-) diff --git a/neutron/common/ovn/constants.py b/neutron/common/ovn/constants.py index 3827c55643c..1df09e2b116 100644 --- a/neutron/common/ovn/constants.py +++ b/neutron/common/ovn/constants.py @@ -274,6 +274,8 @@ TYPE_SECURITY_GROUP_RULES) DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes +MAINTENANCE_TASK_RETRY_LIMIT = 100 # times +MAINTENANCE_ONE_RUN_TASK_SPACING = 5 # seconds # The order in which the resources should be created or updated by the # maintenance task: Root ones first and leafs at the end. diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py index 2112e023f6f..9829dcecea1 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py @@ -54,14 +54,28 @@ INCONSISTENCY_TYPE_DELETE = 'delete' -def has_lock_periodic(*args, **kwargs): +def has_lock_periodic(*args, periodic_run_limit=0, **kwargs): def wrapper(f): + _retries = 0 + @functools.wraps(f) @periodics.periodic(*args, **kwargs) def decorator(self, *args, **kwargs): # This periodic task is included in DBInconsistenciesPeriodics # since it uses the lock to ensure only one worker is executing + # additonally, if periodic_run_limit parameter with value > 0 is + # provided and lock is not acquired for periodic_run_limit + # times, task will not be run anymore by this maintenance worker + nonlocal _retries if not self.has_lock: + if periodic_run_limit > 0: + if _retries >= periodic_run_limit: + LOG.debug("Have not been able to acquire lock to run " + "task '%s' after %s tries, limit reached. " + "No more attempts will be made.", + f, _retries) + raise periodics.NeverAgain() + _retries += 1 return return f(self, *args, **kwargs) return decorator @@ -481,7 +495,10 @@ def _delete_floatingip_and_pf(self, context, fip_id): # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_global_dhcp_opts(self): if (not ovn_conf.get_global_dhcpv4_opts() and not ovn_conf.get_global_dhcpv6_opts()): @@ -511,7 +528,10 @@ def check_global_dhcp_opts(self): # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_for_igmp_snoop_support(self): with self._nb_idl.transaction(check_error=True) as txn: value = ('true' if ovn_conf.is_igmp_snooping_enabled() @@ -531,7 +551,10 @@ def check_for_igmp_snoop_support(self): # TODO(czesla): Remove this in the A+4 cycle # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_port_has_address_scope(self): ports = self._nb_idl.db_find_rows( "Logical_Switch_Port", ("type", "!=", ovn_const.LSP_TYPE_LOCALNET) @@ -574,7 +597,10 @@ def _delete_default_ha_chassis_group(self, txn): # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_for_ha_chassis_group(self): # If external ports is not supported stop running # this periodic task @@ -604,7 +630,10 @@ def check_for_ha_chassis_group(self): # TODO(lucasagomes): Remove this in the B+3 cycle # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_for_mcast_flood_reports(self): cmds = [] for port in self._nb_idl.lsp_list().execute(check_error=True): @@ -703,7 +732,10 @@ def update_port_qos_with_external_ids_reference(self): # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_redirect_type_router_gateway_ports(self): """Check OVN router gateway ports Check for the option "redirect-type=bridged" value for @@ -761,7 +793,10 @@ def check_redirect_type_router_gateway_ports(self): # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_vlan_distributed_ports(self): """Check VLAN distributed ports Check for the option "reside-on-redirect-chassis" value for @@ -802,7 +837,10 @@ def check_vlan_distributed_ports(self): # a gateway (that means, that has "external_ids:OVN_GW_PORT_EXT_ID_KEY"). # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def update_logical_router_with_gateway_network_id(self): """Update all OVN logical router registers with the GW network ID""" cmds = [] @@ -829,7 +867,10 @@ def update_logical_router_with_gateway_network_id(self): # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_baremetal_ports_dhcp_options(self): """Update baremetal ports DHCP options @@ -912,7 +953,10 @@ def update_port_virtual_type(self): raise periodics.NeverAgain() # TODO(ralonsoh): Remove this in the Antelope+4 cycle - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def create_router_extra_attributes_registers(self): """Create missing ``RouterExtraAttributes`` registers. @@ -933,7 +977,10 @@ def create_router_extra_attributes_registers(self): raise periodics.NeverAgain() # TODO(slaweq): Remove this in the E cycle (C+2 as it will be next SLURP) - @periodics.periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def add_gw_port_info_to_logical_router_port(self): """Add info if LRP is connecting internal subnet or ext gateway.""" cmds = [] @@ -969,7 +1016,10 @@ def add_gw_port_info_to_logical_router_port(self): txn.add(cmd) raise periodics.NeverAgain() - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_router_default_route_empty_dst_ip(self): """Check routers with default route with empty dst-ip (LP: #2002993). """ @@ -994,7 +1044,10 @@ def check_router_default_route_empty_dst_ip(self): raise periodics.NeverAgain() # TODO(ralonsoh): Remove this in the Antelope+4 cycle - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def add_vnic_type_and_pb_capabilities_to_lsp(self): """Add the port VNIC type and port binding capabilities to the LSP. @@ -1026,7 +1079,10 @@ def add_vnic_type_and_pb_capabilities_to_lsp(self): raise periodics.NeverAgain() - @has_lock_periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def check_fair_meter_consistency(self): """Update the logging meter after neutron-server reload @@ -1108,7 +1164,10 @@ def cleanup_old_hash_ring_nodes(self): context = n_context.get_admin_context() hash_ring_db.cleanup_old_nodes(context, days=5) - @periodics.periodic(spacing=600, run_immediately=True) + @has_lock_periodic( + periodic_run_limit=ovn_const.MAINTENANCE_TASK_RETRY_LIMIT, + spacing=ovn_const.MAINTENANCE_ONE_RUN_TASK_SPACING, + run_immediately=True) def update_nat_floating_ip_with_gateway_port_reference(self): """Set NAT rule gateway_port column to any floating IP without router gateway port uuid reference - LP#2035281. diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py index c4530a66ec1..b37d2aa4ea8 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py @@ -32,12 +32,69 @@ from neutron.objects import ports as ports_obj from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync +from neutron.tests import base from neutron.tests.unit import fake_resources as fakes from neutron.tests.unit.plugins.ml2 import test_security_group as test_sg from neutron.tests.unit import testlib_api from neutron_lib import exceptions as n_exc +class TestHasLockPeriodicDecorator(base.BaseTestCase): + + def test_decorator_no_limit_have_lock(self): + run_counter = 0 + + @maintenance.has_lock_periodic( + periodic_run_limit=0, spacing=30) + def test_maintenance_task(worker): + nonlocal run_counter + run_counter += 1 + + worker_mock = mock.MagicMock() + worker_mock.has_lock = True + + for _ in range(3): + test_maintenance_task(worker_mock) + self.assertEqual(3, run_counter) + + def test_decorator_no_lock_no_limit(self): + run_counter = 0 + + @maintenance.has_lock_periodic( + periodic_run_limit=0, spacing=30) + def test_maintenance_task(worker): + nonlocal run_counter + run_counter += 1 + + worker_mock = mock.MagicMock() + has_lock_values = [False, False, True] + + for has_lock in has_lock_values: + worker_mock.has_lock = has_lock + test_maintenance_task(worker_mock) + self.assertEqual(1, run_counter) + + def test_decorator_no_lock_with_limit(self): + run_counter = 0 + + @maintenance.has_lock_periodic( + periodic_run_limit=1, spacing=30) + def test_maintenance_task(worker): + nonlocal run_counter + run_counter += 1 + + worker_mock = mock.MagicMock() + + worker_mock.has_lock = False + test_maintenance_task(worker_mock) + self.assertEqual(0, run_counter) + + worker_mock.has_lock = False + self.assertRaises(periodics.NeverAgain, + test_maintenance_task, worker_mock) + self.assertEqual(0, run_counter) + + class TestSchemaAwarePeriodicsBase(testlib_api.SqlTestCaseLight): def test__set_schema_aware_periodics(self): From 271bb48936ef415d2ba3b42ffa2fb07dee8d59b5 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Thu, 19 Jan 2023 17:40:11 +0100 Subject: [PATCH 4/4] rbacs: filter out model that are already owned by context Taking example of a network that have multiple rbacs. In a situation of selecting networks that are shared to a project. If we could could already match the one that are owned by the context we can expect les rbacs to scan. https://bugs.launchpad.net/neutron/+bug/1918145/comments/54 In an environement with about 200 00 rbacs and 200 networks this reduce time of the request from more than 50s to less than a second. Related-bug: #1918145 Signed-off-by: Sahid Orentino Ferdjaoui Change-Id: I54808cbd4cdccfee97eb59053418f55ba57e11a6 Signed-off-by: Sahid Orentino Ferdjaoui Change-Id: Ib155fbb3f6b325d10e3fbea201677dc218111c17 (cherry picked from commit e6de524555db92c0d232bc2f6fbc3edfa751a91f) --- neutron/db/external_net_db.py | 2 ++ neutron/tests/unit/extensions/test_external_net.py | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/neutron/db/external_net_db.py b/neutron/db/external_net_db.py index d4798b49202..ef286c61e30 100644 --- a/neutron/db/external_net_db.py +++ b/neutron/db/external_net_db.py @@ -51,6 +51,8 @@ def _network_filter_hook(context, original_model, conditions): (rbac_model.target_project == context.tenant_id) | (rbac_model.target_project == '*')) conditions = expr.or_(tenant_allowed, *conditions) + conditions = expr.or_(original_model.tenant_id == context.tenant_id, + *conditions) return conditions diff --git a/neutron/tests/unit/extensions/test_external_net.py b/neutron/tests/unit/extensions/test_external_net.py index 465d9825d6d..edc76a2dbeb 100644 --- a/neutron/tests/unit/extensions/test_external_net.py +++ b/neutron/tests/unit/extensions/test_external_net.py @@ -142,7 +142,8 @@ def test_network_filter_hook_admin_context(self): def test_network_filter_hook_nonadmin_context(self): ctx = context.Context('edinson', 'cavani') model = models_v2.Network - txt = ("networkrbacs.action = :action_1 AND " + txt = ("networks.project_id = :project_id_1 OR " + "networkrbacs.action = :action_1 AND " "networkrbacs.target_project = :target_project_1 OR " "networkrbacs.target_project = :target_project_2") conditions = external_net_db._network_filter_hook(ctx, model, []) @@ -150,7 +151,8 @@ def test_network_filter_hook_nonadmin_context(self): # Try to concatenate conditions txt2 = (txt.replace('project_1', 'project_3'). replace('project_2', 'project_4'). - replace('action_1', 'action_2')) + replace('action_1', 'action_2'). + replace('project_id_1', 'project_id_2')) conditions = external_net_db._network_filter_hook(ctx, model, conditions) self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt2))