diff --git a/doc/source/ovn/migration.rst b/doc/source/ovn/migration.rst index 7fa733ce4ff..38d9bb65a52 100644 --- a/doc/source/ovn/migration.rst +++ b/doc/source/ovn/migration.rst @@ -155,6 +155,12 @@ Perform the following steps in the undercloud server that will be used as a NFS server to store the backup. Default: 192.168.24.1 + * BACKUP_MIGRATION_CTL_PLANE_CIDRS - Only used if CREATE_BACKUP is enabled. + A comma separated string of control plane subnets in CIDR notation for the + controllers being backed up. The specified subnets will be used to enable + NFS remote clients connections. + Default: 192.168.24.0/24 + .. warning:: Please note that VALIDATE_MIGRATION requires enough quota (2 diff --git a/neutron/agent/metadata/driver.py b/neutron/agent/metadata/driver.py index 0b23879354f..2c9af24d5e1 100644 --- a/neutron/agent/metadata/driver.py +++ b/neutron/agent/metadata/driver.py @@ -255,14 +255,6 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, bind_address="0.0.0.0", network_id=None, router_id=None, bind_address_v6=None, bind_interface=None): - uuid = network_id or router_id - callback = cls._get_metadata_proxy_callback( - bind_address, port, conf, - network_id=network_id, router_id=router_id, - bind_address_v6=bind_address_v6, bind_interface=bind_interface) - pm = cls._get_metadata_proxy_process_manager(uuid, conf, - ns_name=ns_name, - callback=callback) if bind_interface is not None and bind_address_v6 is not None: # HAProxy cannot bind() until IPv6 Duplicate Address Detection # completes. We must wait until the address leaves its 'tentative' @@ -290,7 +282,18 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, except Exception as exc: # do not re-raise a delete failure, just log LOG.info('Address deletion failure: %s', str(exc)) - return + + # Do not use the address or interface when DAD fails + bind_address_v6 = bind_interface = None + + uuid = network_id or router_id + callback = cls._get_metadata_proxy_callback( + bind_address, port, conf, + network_id=network_id, router_id=router_id, + bind_address_v6=bind_address_v6, bind_interface=bind_interface) + pm = cls._get_metadata_proxy_process_manager(uuid, conf, + ns_name=ns_name, + callback=callback) pm.enable() monitor.register(uuid, METADATA_SERVICE_NAME, pm) cls.monitors[router_id] = pm diff --git a/neutron/db/models/address_group.py b/neutron/db/models/address_group.py index 0e147bed06d..d810bb1de12 100644 --- a/neutron/db/models/address_group.py +++ b/neutron/db/models/address_group.py @@ -46,6 +46,6 @@ class AddressGroup(standard_attr.HasStandardAttributes, cascade='all, delete-orphan') rbac_entries = sa.orm.relationship(rbac_db_models.AddressGroupRBAC, backref='address_groups', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') api_collections = [ag.ALIAS] diff --git a/neutron/db/models/address_scope.py b/neutron/db/models/address_scope.py index 18c80ce7d74..889814e64a6 100644 --- a/neutron/db/models/address_scope.py +++ b/neutron/db/models/address_scope.py @@ -37,5 +37,5 @@ class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasProject): rbac_entries = sa.orm.relationship(rbac_db_models.AddressScopeRBAC, backref='address_scopes', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') diff --git a/neutron/db/models/securitygroup.py b/neutron/db/models/securitygroup.py index 75baec49bb2..7e80b0dc04b 100644 --- a/neutron/db/models/securitygroup.py +++ b/neutron/db/models/securitygroup.py @@ -34,7 +34,7 @@ class SecurityGroup(standard_attr.HasStandardAttributes, model_base.BASEV2, nullable=False) rbac_entries = sa.orm.relationship(rbac_db_models.SecurityGroupRBAC, backref='security_group', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') api_collections = [sg.SECURITYGROUPS] collection_resource_map = {sg.SECURITYGROUPS: 'security_group'} diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py index 70acf2e86a2..9e001c7dbdd 100644 --- a/neutron/db/models_v2.py +++ b/neutron/db/models_v2.py @@ -228,7 +228,7 @@ class Subnet(standard_attr.HasStandardAttributes, model_base.BASEV2, # subnets don't have their own rbac_entries, they just inherit from # the network rbac entries rbac_entries = orm.relationship( - rbac_db_models.NetworkRBAC, lazy='subquery', uselist=True, + rbac_db_models.NetworkRBAC, lazy='joined', uselist=True, foreign_keys='Subnet.network_id', primaryjoin='Subnet.network_id==NetworkRBAC.object_id', viewonly=True) @@ -282,7 +282,7 @@ class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2, lazy='subquery') rbac_entries = sa.orm.relationship(rbac_db_models.SubnetPoolRBAC, backref='subnetpools', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') api_collections = [subnetpool_def.COLLECTION_NAME] collection_resource_map = {subnetpool_def.COLLECTION_NAME: @@ -304,7 +304,7 @@ class Network(standard_attr.HasStandardAttributes, model_base.BASEV2, rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, backref=orm.backref('network', load_on_pending=True), - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') availability_zone_hints = sa.Column(sa.String(255)) mtu = sa.Column(sa.Integer, nullable=False, diff --git a/neutron/db/qos/models.py b/neutron/db/qos/models.py index f0662d5a91f..73233e15f3f 100644 --- a/neutron/db/qos/models.py +++ b/neutron/db/qos/models.py @@ -29,7 +29,7 @@ class QosPolicy(standard_attr.HasStandardAttributes, model_base.BASEV2, __tablename__ = 'qos_policies' name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) rbac_entries = sa.orm.relationship(rbac_db_models.QosPolicyRBAC, - backref='qos_policy', lazy='subquery', + backref='qos_policy', lazy='joined', cascade='all, delete, delete-orphan') api_collections = ['policies'] collection_resource_map = {'policies': 'policy'} diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py index 2c7482b16a6..c63fb303775 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -1028,6 +1028,10 @@ def sync_networks_ports_and_dhcp_opts(self, ctx): except RuntimeError: LOG.warning("Create network in OVN NB failed for " "network %s", network['id']) + except n_exc.IpAddressGenerationFailure: + LOG.warning("No more IP addresses available during " + "implicit port creation while creating " + "network %s", network['id']) self._sync_metadata_ports(ctx, db_ports) diff --git a/neutron/tests/unit/agent/metadata/test_driver.py b/neutron/tests/unit/agent/metadata/test_driver.py index 7d4df6750cb..e01300f7b83 100644 --- a/neutron/tests/unit/agent/metadata/test_driver.py +++ b/neutron/tests/unit/agent/metadata/test_driver.py @@ -176,9 +176,12 @@ def _test_spawn_metadata_proxy(self, dad_failed=False): "%s.conf" % router_id) mock_open = self.useFixture( lib_fixtures.OpenFixture(cfg_file)).mock_open + bind_v6_line = 'bind %s:%s interface %s' % ( + self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if') if dad_failed: mock_wait.side_effect = ip_lib.DADFailed( - address=self.METADATA_DEFAULT_IP, reason='DAD failed') + address=self.METADATA_DEFAULT_IPV6, reason='DAD failed') + bind_v6_line = '' else: mock_wait.return_value = True agent.metadata_driver.spawn_monitored_metadata_proxy( @@ -197,8 +200,6 @@ def _test_spawn_metadata_proxy(self, dad_failed=False): log_tag = ("haproxy-" + metadata_driver.METADATA_SERVICE_NAME + "-" + router_id) - bind_v6_line = 'bind %s:%s interface %s' % ( - self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if') cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % { 'user': self.EUNAME, 'group': self.EGNAME, @@ -214,26 +215,26 @@ def _test_spawn_metadata_proxy(self, dad_failed=False): 'bind_v6_line': bind_v6_line} if dad_failed: - agent.process_monitor.register.assert_not_called() mock_del.assert_called_once_with(self.METADATA_DEFAULT_IPV6, 'fake-if', namespace=router_ns) else: - mock_open.assert_has_calls([ - mock.call(cfg_file, 'w'), - mock.call().write(cfg_contents)], any_order=True) - - ip_mock.assert_has_calls([ - mock.call(namespace=router_ns), - mock.call().netns.execute(netns_execute_args, - addl_env=None, run_as_root=True) - ]) - - agent.process_monitor.register.assert_called_once_with( - router_id, metadata_driver.METADATA_SERVICE_NAME, - mock.ANY) mock_del.assert_not_called() + mock_open.assert_has_calls([ + mock.call(cfg_file, 'w'), + mock.call().write(cfg_contents)], any_order=True) + + ip_mock.assert_has_calls([ + mock.call(namespace=router_ns), + mock.call().netns.execute(netns_execute_args, addl_env=None, + run_as_root=True) + ]) + + agent.process_monitor.register.assert_called_once_with( + router_id, metadata_driver.METADATA_SERVICE_NAME, + mock.ANY) + def test_spawn_metadata_proxy(self): self._test_spawn_metadata_proxy() diff --git a/neutron/tests/unit/objects/test_address_group.py b/neutron/tests/unit/objects/test_address_group.py index c932f20577d..a3b5f89e178 100644 --- a/neutron/tests/unit/objects/test_address_group.py +++ b/neutron/tests/unit/objects/test_address_group.py @@ -58,6 +58,7 @@ class AddressGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = address_group.AddressGroupRBAC + _parent_class = address_group.AddressGroup def setUp(self): super(AddressGroupRBACDbObjectTestCase, self).setUp() diff --git a/neutron/tests/unit/objects/test_address_scope.py b/neutron/tests/unit/objects/test_address_scope.py index 77d88f408a8..d50825cde99 100644 --- a/neutron/tests/unit/objects/test_address_scope.py +++ b/neutron/tests/unit/objects/test_address_scope.py @@ -36,6 +36,7 @@ class AddressScopeRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = address_scope.AddressScopeRBAC + _parent_class = address_scope.AddressScope def setUp(self): super(AddressScopeRBACDbObjectTestCase, self).setUp() diff --git a/neutron/tests/unit/objects/test_network.py b/neutron/tests/unit/objects/test_network.py index bfba77447f2..57204dce3c0 100644 --- a/neutron/tests/unit/objects/test_network.py +++ b/neutron/tests/unit/objects/test_network.py @@ -12,6 +12,8 @@ from unittest import mock +from neutron_lib.api.definitions import availability_zone as az_def + from neutron.db import rbac_db_models from neutron.objects import base as obj_base from neutron.objects import network @@ -27,6 +29,7 @@ class NetworkRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = network.NetworkRBAC + _parent_class = network.Network def setUp(self): self._mock_get_valid_actions = mock.patch.object( @@ -50,6 +53,13 @@ def test_object_version_degradation_1_1_to_1_0_no_id_no_project_id(self): network_rbac_obj['versioned_object.data']) self.assertNotIn('id', network_rbac_obj['versioned_object.data']) + def _create_random_parent_object(self): + objclass_fields = self.get_random_db_fields(self._parent_class) + objclass_fields.pop(az_def.AZ_HINTS) + _obj = self._parent_class(self.context, **objclass_fields) + _obj.create() + return _obj + class NetworkRBACIfaceOjectTestCase(test_rbac.TestRBACObjectMixin, obj_test_base.BaseObjectIfaceTestCase): diff --git a/neutron/tests/unit/objects/test_rbac.py b/neutron/tests/unit/objects/test_rbac.py index 8775a4046bc..02cec9cd0fa 100644 --- a/neutron/tests/unit/objects/test_rbac.py +++ b/neutron/tests/unit/objects/test_rbac.py @@ -9,10 +9,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import random +import random from unittest import mock +from neutron_lib import context + +from neutron.db import rbac_db_models from neutron.objects import address_group from neutron.objects import address_scope from neutron.objects import network @@ -26,6 +29,9 @@ class TestRBACObjectMixin(object): + _test_class = None + _parent_class = None + def get_random_object_fields(self, obj_cls=None): fields = (super(TestRBACObjectMixin, self). get_random_object_fields(obj_cls)) @@ -34,6 +40,35 @@ def get_random_object_fields(self, obj_cls=None): fields['action'] = rnd_actions[idx] return fields + def _create_random_parent_object(self): + objclass_fields = self.get_random_db_fields(self._parent_class) + _obj = self._parent_class(self.context, **objclass_fields) + _obj.create() + return _obj + + def test_rbac_shared_on_parent_object(self): + if not self._test_class or not self._parent_class: + self.skipTest('Mixin class, skipped test') + project_id = self.objs[0].project_id + _obj_shared = self._create_random_parent_object() + # Create a second object that won't be shared and thus won't be + # retrieved by the non-admin users. + self._create_random_parent_object() + for idx in range(3): + project = 'project_%s' % idx + rbac = self._test_class( + self.context, project_id=project_id, target_project=project, + action=rbac_db_models.ACCESS_SHARED, + object_id=_obj_shared.id) + rbac.create() + + for idx in range(3): + project = 'project_%s' % idx + ctx_no_admin = context.Context(user_id='user', tenant_id=project, + is_admin=False) + objects = self._parent_class.get_objects(ctx_no_admin) + self.assertEqual([_obj_shared.id], [_obj.id for _obj in objects]) + class RBACBaseObjectTestCase(neutron_test_base.BaseTestCase): diff --git a/neutron/tests/unit/objects/test_securitygroup.py b/neutron/tests/unit/objects/test_securitygroup.py index 192cf86c75a..53a7901d3ac 100644 --- a/neutron/tests/unit/objects/test_securitygroup.py +++ b/neutron/tests/unit/objects/test_securitygroup.py @@ -27,6 +27,7 @@ class SecurityGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = securitygroup.SecurityGroupRBAC + _parent_class = securitygroup.SecurityGroup def setUp(self): super(SecurityGroupRBACDbObjectTestCase, self).setUp() diff --git a/neutron/tests/unit/objects/test_subnetpool.py b/neutron/tests/unit/objects/test_subnetpool.py index 57834dc756e..8f3f0fa6ac5 100644 --- a/neutron/tests/unit/objects/test_subnetpool.py +++ b/neutron/tests/unit/objects/test_subnetpool.py @@ -195,6 +195,7 @@ class SubnetPoolRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, SubnetPoolTestMixin): _test_class = subnetpool.SubnetPoolRBAC + _parent_class = subnetpool.SubnetPool def setUp(self): super(SubnetPoolRBACDbObjectTestCase, self).setUp() diff --git a/tools/ovn_migration/tripleo_environment/ovn_migration.sh b/tools/ovn_migration/tripleo_environment/ovn_migration.sh index 271783e74a5..14c6ce7d0c2 100644 --- a/tools/ovn_migration/tripleo_environment/ovn_migration.sh +++ b/tools/ovn_migration/tripleo_environment/ovn_migration.sh @@ -42,7 +42,8 @@ LANG=C : ${VALIDATE_MIGRATION:=False} : ${DHCP_RENEWAL_TIME:=30} : ${CREATE_BACKUP:=True} -: ${BACKUP_MIGRATION_IP:=192.168.24.1} # TODO: Document this new var +: ${BACKUP_MIGRATION_IP:=192.168.24.1} +: ${BACKUP_MIGRATION_CTL_PLANE_CIDRS:=192.168.24.0/24} check_for_necessary_files() { @@ -328,6 +329,7 @@ start_migration() { -e overcloudrc=$OVERCLOUDRC_FILE \ -e stackrc=$STACKRC_FILE \ -e backup_migration_ip=$BACKUP_MIGRATION_IP \ + -e backup_migration_ctl_plane_cidrs=$BACKUP_MIGRATION_CTL_PLANE_CIDRS \ -e create_backup=$CREATE_BACKUP \ -e ansible_inventory=$inventory_file \ -e validate_migration=$VALIDATE_MIGRATION $* diff --git a/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml b/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml index 6ed510316ae..d018183fb5c 100644 --- a/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml +++ b/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml @@ -36,6 +36,7 @@ --setup-nfs \ --extra-vars '{ "tripleo_backup_and_restore_server": {{ backup_migration_ip }}, + "tripleo_backup_and_restore_clients_nets": {{ backup_migration_ctl_plane_cidrs.split(',') }}, "nfs_server_group_name": {{ revert_preparation_server_name }} }'