diff --git a/doc/source/admin/config-sriov.rst b/doc/source/admin/config-sriov.rst index 662081084ce..4d663fa784f 100644 --- a/doc/source/admin/config-sriov.rst +++ b/doc/source/admin/config-sriov.rst @@ -64,22 +64,14 @@ The following manufacturers are known to work: - QLogic - Broadcom -For information on **Mellanox SR-IOV Ethernet ConnectX cards**, see: +For information on **Mellanox SR-IOV Ethernet ConnectX cards**, see the +`Mellanox: How To Configure SR-IOV VFs on ConnectX-4 or newer `_. -- `Mellanox: How To Configure SR-IOV VFs on ConnectX-4 or newer `_. -- `Mellanox: How To Configure SR-IOV VFs on ConnectX-3/ConnectX-3 Pro `_. +For information on **QLogic SR-IOV Ethernet cards**, see the +`User's Guide OpenStack Deployment with SR-IOV Configuration `_. -For information on **QLogic SR-IOV Ethernet cards**, see: - -- `User's Guide OpenStack Deployment with SR-IOV Configuration `_. - -For information on **Broadcom NetXtreme-E Series Ethernet cards**, see the -`Broadcom NetXtreme-C/NetXtreme-E User Guide -`_. - -For information on **Broadcom NetXtreme-S Series Ethernet cards**, see the -`Broadcom NetXtreme-S Product Page -`_. +For information on **Broadcom NetXtreme Series Ethernet cards**, see the +`Broadcom NetXtreme Product Page `_. Using SR-IOV interfaces ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/neutron/agent/ovn/metadata/agent.py b/neutron/agent/ovn/metadata/agent.py index accdff80622..d40aa26f5af 100644 --- a/neutron/agent/ovn/metadata/agent.py +++ b/neutron/agent/ovn/metadata/agent.py @@ -315,6 +315,12 @@ def _get_ovn_bridge(self): "br-int instead.") return 'br-int' + def get_networks(self): + ports = self.sb_idl.get_ports_on_chassis(self.chassis) + return {(str(p.datapath.uuid), + ovn_utils.get_network_name_from_datapath(p.datapath)) + for p in self._vif_ports(ports)} + @_sync_lock def sync(self): """Agent sync. @@ -323,16 +329,26 @@ def sync(self): chassis are serving metadata. Also, it will tear down those namespaces which were serving metadata but are no longer needed. """ - metadata_namespaces = self.ensure_all_networks_provisioned() + + # first, clean up namespaces that should no longer deploy system_namespaces = tuple( ns.decode('utf-8') if isinstance(ns, bytes) else ns for ns in ip_lib.list_network_namespaces()) + nets = self.get_networks() + metadata_namespaces = [ + self._get_namespace_name(net[1]) + for net in nets + ] unused_namespaces = [ns for ns in system_namespaces if ns.startswith(NS_PREFIX) and ns not in metadata_namespaces] for ns in unused_namespaces: self.teardown_datapath(self._get_datapath_name(ns)) + # now that all obsolete namespaces are cleaned up, deploy required + # networks + self.ensure_all_networks_provisioned(nets) + @staticmethod def _get_veth_name(datapath): return ['{}{}{}'.format(n_const.TAP_DEVICE_PREFIX, @@ -424,8 +440,6 @@ def provision_datapath(self, datapath, net_name): and assign the IP addresses to the interface corresponding to the metadata port of the network. It will also remove existing IP addresses that are no longer needed. - - :return: The metadata namespace name of this datapath """ LOG.debug("Provisioning metadata for network %s", net_name) port = self.sb_idl.get_metadata_port_network(datapath) @@ -533,28 +547,13 @@ def provision_datapath(self, datapath, net_name): self.conf, bind_address=n_const.METADATA_V4_IP, network_id=net_name) - return namespace + def ensure_all_networks_provisioned(self, nets): + """Ensure that all requested datapaths are provisioned. - def ensure_all_networks_provisioned(self): - """Ensure that all datapaths are provisioned. - - This function will make sure that all datapaths with ports bound to - our chassis have its namespace, VETH pair and OVS port created and - metadata proxy is up and running. - - :return: A list with the namespaces that are currently serving - metadata + This function will make sure that requested datapaths have their + namespaces, VETH pair and OVS ports created and metadata proxies are up + and running. """ - # Retrieve all VIF ports in our Chassis - ports = self.sb_idl.get_ports_on_chassis(self.chassis) - nets = {(str(p.datapath.uuid), - ovn_utils.get_network_name_from_datapath(p.datapath)) - for p in self._vif_ports(ports)} - namespaces = [] # Make sure that all those datapaths are serving metadata for datapath, net_name in nets: - netns = self.provision_datapath(datapath, net_name) - if netns: - namespaces.append(netns) - - return namespaces + self.provision_datapath(datapath, net_name) diff --git a/neutron/services/logapi/drivers/ovn/driver.py b/neutron/services/logapi/drivers/ovn/driver.py index 080bcc475b8..98ed3f29ec9 100644 --- a/neutron/services/logapi/drivers/ovn/driver.py +++ b/neutron/services/logapi/drivers/ovn/driver.py @@ -302,6 +302,46 @@ def create_log_precommit(self, context, log_obj): if not self.network_logging_supported(self.ovn_nb): raise LoggingNotSupported() + def _unset_disabled_acls(self, context, log_obj, ovn_txn): + """Check if we need to disable any ACLs after an update. + + Will return True if there were more logs, and False if there was + nothing to check. + + :param context: current running context information + :param log_obj: a log_object which was updated + :returns: True if there were other logs enabled, otherwise False. + """ + if log_obj.enabled: + return False + + pgs = self._pgs_from_log_obj(context, log_obj) + other_logs = [log for log in self._get_logs(context) + if log.id != log_obj.id and log.enabled] + if not other_logs: + return False + + if log_obj.event == log_const.ALL_EVENT: + acls_to_check = pgs[0]["acls"].copy() + if not acls_to_check: + return True + for log in other_logs: + for acl in self._pgs_from_log_obj(context, log)[0]["acls"]: + if acl in acls_to_check: + acls_to_check.remove(acl) + if not acls_to_check: + return True + acls_to_remove = [{"name": pgs[0]["name"], "acls": acls_to_check}] + self._remove_acls_log(acls_to_remove, ovn_txn) + else: + all_events = set([log.event for log in other_logs + if (not log.resource_id or + log.resource_id == log_obj.resource_id)]) + if (log_const.ALL_EVENT not in all_events and + log_obj.event not in all_events): + self._remove_acls_log(pgs, ovn_txn) + return True + def update_log(self, context, log_obj): """Update a log_obj invocation. @@ -311,11 +351,13 @@ def update_log(self, context, log_obj): """ LOG.debug("Update_log %s", log_obj) - pgs = self._pgs_from_log_obj(context, log_obj) - actions_enabled = self._acl_actions_enabled(log_obj) with self.ovn_nb.transaction(check_error=True) as ovn_txn: - self._set_acls_log(pgs, ovn_txn, actions_enabled, - utils.ovn_name(log_obj.id)) + + if not self._unset_disabled_acls(context, log_obj, ovn_txn): + pgs = self._pgs_from_log_obj(context, log_obj) + actions_enabled = self._acl_actions_enabled(log_obj) + self._set_acls_log(pgs, ovn_txn, actions_enabled, + utils.ovn_name(log_obj.id)) def delete_log(self, context, log_obj): """Delete a log_obj invocation. diff --git a/neutron/tests/functional/agent/l3/framework.py b/neutron/tests/functional/agent/l3/framework.py index af476b7085a..c773d2945f3 100644 --- a/neutron/tests/functional/agent/l3/framework.py +++ b/neutron/tests/functional/agent/l3/framework.py @@ -40,6 +40,7 @@ from neutron.conf.agent import common as agent_config from neutron.conf.agent.l3 import config as l3_config from neutron.conf import common as common_config +from neutron.tests import base as test_base from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional import base @@ -703,8 +704,8 @@ def fail_ha_router(self, router): ha_device = ip_lib.IPDevice(device_name, router.ha_namespace) ha_device.link.set_down() - @staticmethod - def wait_until_ha_router_has_state(router, expected_state): + @test_base.unstable_test("bug 1956958") + def wait_until_ha_router_has_state(self, router, expected_state): def router_has_expected_state(): state = router.ha_state diff --git a/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py b/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py index b67680b707f..03f1d365d62 100644 --- a/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py +++ b/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py @@ -338,3 +338,59 @@ def test_events_one_sg(self): self._add_logs_then_remove( log_const.DROP_EVENT, log_const.ACCEPT_EVENT, sg=self.sg3, sgrs=self.sg3rs) + + def test_disable_logs(self): + # This test ensures that acls are correctly disabled when having + # multiple log objects. + + # Check there are no acls with their logging active + sgrs = self.sg1rs + self._check_sgrs(sgrs, is_enabled=False) + self._check_acl_log_drop(is_enabled=False) + + # Add accept log object + log_data1 = self._log_data(sg_id=self.sg1) + event1 = log_const.ACCEPT_EVENT + log_data1['log']['event'] = event1 + log_obj1 = self.log_plugin.create_log(self.ctxt, log_data1) + self._check_acl_log_drop(is_enabled=False) + self._check_sgrs(sgrs=sgrs, is_enabled=True) + + # Add drop log object + log_data2 = self._log_data(sg_id=self.sg1) + event2 = log_const.DROP_EVENT + log_data2['log']['event'] = event2 + log_obj2 = self.log_plugin.create_log(self.ctxt, log_data2) + self._check_acl_log_drop(is_enabled=True) + self._check_sgrs(sgrs=sgrs, is_enabled=True) + + # Disable drop log object and check it worked correctly + log_data2['log']['enabled'] = False + self.log_plugin.update_log(self.ctxt, log_obj2['id'], log_data2) + self._check_acl_log_drop(is_enabled=False) + self._check_sgrs(sgrs=sgrs, is_enabled=True) + + # Enable drop log and create all log object + log_data2['log']['enabled'] = True + self.log_plugin.update_log(self.ctxt, log_obj2['id'], log_data2) + self._check_acl_log_drop(is_enabled=True) + self._check_sgrs(sgrs=sgrs, is_enabled=True) + + log_data3 = self._log_data(sg_id=self.sg1) + log_data3['log']['event'] = log_const.ALL_EVENT + log_obj3 = self.log_plugin.create_log(self.ctxt, log_data3) + self._check_sgrs(sgrs=sgrs, is_enabled=True) + self._check_acl_log_drop(is_enabled=True) + + # Disable all log object and check all acls are still enabled (because + # of the other objects) + log_data3['log']['enabled'] = False + self.log_plugin.update_log(self.ctxt, log_obj3['id'], log_data3) + self._check_sgrs(sgrs=sgrs, is_enabled=True) + self._check_acl_log_drop(is_enabled=True) + + # Disable accept log object and only drop traffic gets logged + log_data1['log']['enabled'] = False + self.log_plugin.update_log(self.ctxt, log_obj1['id'], log_data1) + self._check_sgrs(sgrs=sgrs, is_enabled=False) + self._check_acl_log_drop(is_enabled=True) diff --git a/neutron/tests/unit/agent/ovn/metadata/test_agent.py b/neutron/tests/unit/agent/ovn/metadata/test_agent.py index cf18de05ef3..79357c1ffa8 100644 --- a/neutron/tests/unit/agent/ovn/metadata/test_agent.py +++ b/neutron/tests/unit/agent/ovn/metadata/test_agent.py @@ -73,19 +73,29 @@ def setUp(self): self.agent.chassis = 'chassis' self.agent.ovn_bridge = 'br-int' + self.ports = [] + for i in range(0, 3): + self.ports.append(makePort(datapath=DatapathInfo(uuid=str(i), + external_ids={'name': 'neutron-%d' % i}))) + self.agent.sb_idl.get_ports_on_chassis.return_value = self.ports + def test_sync(self): + with mock.patch.object( self.agent, 'ensure_all_networks_provisioned') as enp,\ mock.patch.object( ip_lib, 'list_network_namespaces') as lnn,\ mock.patch.object( self.agent, 'teardown_datapath') as tdp: - enp.return_value = ['ovnmeta-1', 'ovnmeta-2'] lnn.return_value = ['ovnmeta-1', 'ovnmeta-2'] self.agent.sync() - enp.assert_called_once_with() + enp.assert_called_once_with({ + (p.datapath.uuid, p.datapath.uuid) + for p in self.ports + }) + lnn.assert_called_once_with() tdp.assert_not_called() @@ -97,18 +107,20 @@ def test_sync_teardown_namespace(self): ip_lib, 'list_network_namespaces') as lnn,\ mock.patch.object( self.agent, 'teardown_datapath') as tdp: - enp.return_value = ['ovnmeta-1', 'ovnmeta-2'] lnn.return_value = ['ovnmeta-1', 'ovnmeta-2', 'ovnmeta-3', 'ns1', 'ns2'] self.agent.sync() - enp.assert_called_once_with() + enp.assert_called_once_with({ + (p.datapath.uuid, p.datapath.uuid) + for p in self.ports + }) lnn.assert_called_once_with() tdp.assert_called_once_with('3') - def test_ensure_all_networks_provisioned(self): - """Test networks are provisioned. + def test_get_networks(self): + """Test which networks are provisioned. This test simulates that this chassis has the following ports: * datapath '0': 1 port @@ -117,44 +129,27 @@ def test_ensure_all_networks_provisioned(self): * datapath '3': 1 port with type 'external' * datapath '5': 1 port with type 'unknown' - It is expected that only datapaths '0', '1' and '2' are provisioned - once. + It is expected that only datapaths '0', '1' and '2' are scheduled for + provisioning. """ - ports = [] - for i in range(0, 3): - ports.append(makePort(datapath=DatapathInfo(uuid=str(i), - external_ids={'name': 'neutron-%d' % i}))) - ports.append(makePort(datapath=DatapathInfo(uuid='1', + self.ports.append(makePort(datapath=DatapathInfo(uuid='1', external_ids={'name': 'neutron-1'}))) - ports.append(makePort(datapath=DatapathInfo(uuid='3', + self.ports.append(makePort(datapath=DatapathInfo(uuid='3', external_ids={'name': 'neutron-3'}), type='external')) - ports.append(makePort(datapath=DatapathInfo(uuid='5', + self.ports.append(makePort(datapath=DatapathInfo(uuid='5', external_ids={'name': 'neutron-5'}), type='unknown')) - with mock.patch.object(self.agent, 'provision_datapath', - return_value=None) as pdp,\ - mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis', - return_value=ports): - self.agent.ensure_all_networks_provisioned() - - expected_calls = [mock.call(str(i), str(i)) for i in range(0, 4)] - self.assertEqual(sorted(expected_calls), - sorted(pdp.call_args_list)) + expected_networks = {(str(i), str(i)) for i in range(0, 4)} + self.assertEqual(expected_networks, self.agent.get_networks()) def test_update_datapath_provision(self): - ports = [] - for i in range(0, 3): - ports.append(makePort(datapath=DatapathInfo(uuid=str(i), - external_ids={'name': 'neutron-%d' % i}))) - ports.append(makePort(datapath=DatapathInfo(uuid='3', + self.ports.append(makePort(datapath=DatapathInfo(uuid='3', external_ids={'name': 'neutron-3'}), type='external')) with mock.patch.object(self.agent, 'provision_datapath', return_value=None) as pdp,\ - mock.patch.object(self.agent, 'teardown_datapath') as tdp,\ - mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis', - return_value=ports): + mock.patch.object(self.agent, 'teardown_datapath') as tdp: self.agent.update_datapath('1', 'a') self.agent.update_datapath('3', 'b') expected_calls = [mock.call('1', 'a'), mock.call('3', 'b')] @@ -162,16 +157,9 @@ def test_update_datapath_provision(self): tdp.assert_not_called() def test_update_datapath_teardown(self): - ports = [] - for i in range(0, 3): - ports.append(makePort(datapath=DatapathInfo(uuid=str(i), - external_ids={'name': 'neutron-%d' % i}))) - with mock.patch.object(self.agent, 'provision_datapath', return_value=None) as pdp,\ - mock.patch.object(self.agent, 'teardown_datapath') as tdp,\ - mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis', - return_value=ports): + mock.patch.object(self.agent, 'teardown_datapath') as tdp: self.agent.update_datapath('5', 'a') tdp.assert_called_once_with('5', 'a') pdp.assert_not_called() diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index f403d3983ce..302538f6f31 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -34,6 +34,7 @@ - ^rally-jobs/.*$ - ^zuul.d/(?!(project)).*\.yaml vars: + configure_swap_size: 8192 Q_BUILD_OVS_FROM_GIT: True MEMORY_TRACKER: True INSTALL_OVN: True @@ -130,7 +131,6 @@ vars: nslookup_target: 'opendev.org' enable_fips: True - configure_swap_size: 4096 devstack_localrc: ISCSI_CHAP_ALGORITHMS: SHA3-256,SHA256 Q_BUILD_OVS_FROM_GIT: true @@ -144,7 +144,6 @@ vars: nslookup_target: 'opendev.org' enable_fips: True - configure_swap_size: 4096 devstack_localrc: ISCSI_CHAP_ALGORITHMS: SHA3-256,SHA256 Q_BUILD_OVS_FROM_GIT: true