From 1a66962ee338e21f9523025f06346d35ddca334d Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Sun, 28 May 2023 17:28:03 +0200 Subject: [PATCH 1/4] Change RBAC relationship loading method to "joined" This patch changes all RBAC relationship method to "joined". This change enforces that the RBAC associated registers are loaded along with the parent resource. The rationale of this change is to be able to control the SQL query executed; the subquery cannot be directly managed by Neutron. It is very usual to create the RBAC rules from one single project that is usually the adminitrator project. That means all RBAC rules will belong to it. Before this change, the SQL subquery performed to retrieve the RBAC entries was this (from a network query): SELECT networks.id AS networks_id FROM networks LEFT OUTER JOIN networkrbacs ON networks.id = networkrbacs.object_id WHERE networks.project_id = 'bd133e2c499c4bf8aeb16206e31c3c20' OR networkrbacs.action = 'access_as_external' AND networkrbacs.target_project = 'bd133e2c499c4bf8aeb16206e31c3c20' OR networkrbacs.target_project = '*' OR networks.project_id = 'bd133e2c499c4bf8aeb16206e31c3c20' OR networkrbacs.action IN ('access_as_shared', 'access_as_readonly') AND (networkrbacs.target_project = 'bd133e2c499c4bf8aeb16206e31c3c20' OR networkrbacs.target_project = '*'); This SQL result has a very low cardinality; that means there are many duplicated registers. For example, with 10 external network, 1000 projects and 2500 RBAC rules, this query returns 1.4 million rows. Instead if a "GROUP BY resource_id" (in this case network_id) clause is added, the number of rows is reduced to 10 (considering this project has a RBAC per network). In order to introduce this "GROUP BY" clause, this patch is changing the loading method. The clause is added in a neutron-lib patch [1]. This change by itself does not improve the query performance. The neutron-lib patch is needed too. Although this patch does not modify que SQL query results, the tests added will prove that the neutron-lib patch does not introduce any regression. [1]https://review.opendev.org/c/openstack/neutron-lib/+/884878 Closes-Bug: #1918145 Change-Id: Ic6001bd5a57493b8befdf81a41eb0bd1c8022df3 (cherry picked from commit e9da29d16c474822c015996cf34e40005419146a) --- neutron/db/models/address_group.py | 2 +- neutron/db/models/address_scope.py | 2 +- neutron/db/models/securitygroup.py | 2 +- neutron/db/models_v2.py | 6 +-- neutron/db/qos/models.py | 2 +- .../tests/unit/objects/test_address_group.py | 1 + .../tests/unit/objects/test_address_scope.py | 1 + neutron/tests/unit/objects/test_network.py | 10 +++++ neutron/tests/unit/objects/test_rbac.py | 37 ++++++++++++++++++- .../tests/unit/objects/test_securitygroup.py | 1 + neutron/tests/unit/objects/test_subnetpool.py | 1 + 11 files changed, 57 insertions(+), 8 deletions(-) diff --git a/neutron/db/models/address_group.py b/neutron/db/models/address_group.py index 0e147bed06d..d810bb1de12 100644 --- a/neutron/db/models/address_group.py +++ b/neutron/db/models/address_group.py @@ -46,6 +46,6 @@ class AddressGroup(standard_attr.HasStandardAttributes, cascade='all, delete-orphan') rbac_entries = sa.orm.relationship(rbac_db_models.AddressGroupRBAC, backref='address_groups', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') api_collections = [ag.ALIAS] diff --git a/neutron/db/models/address_scope.py b/neutron/db/models/address_scope.py index 18c80ce7d74..889814e64a6 100644 --- a/neutron/db/models/address_scope.py +++ b/neutron/db/models/address_scope.py @@ -37,5 +37,5 @@ class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasProject): rbac_entries = sa.orm.relationship(rbac_db_models.AddressScopeRBAC, backref='address_scopes', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') diff --git a/neutron/db/models/securitygroup.py b/neutron/db/models/securitygroup.py index 75baec49bb2..7e80b0dc04b 100644 --- a/neutron/db/models/securitygroup.py +++ b/neutron/db/models/securitygroup.py @@ -34,7 +34,7 @@ class SecurityGroup(standard_attr.HasStandardAttributes, model_base.BASEV2, nullable=False) rbac_entries = sa.orm.relationship(rbac_db_models.SecurityGroupRBAC, backref='security_group', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') api_collections = [sg.SECURITYGROUPS] collection_resource_map = {sg.SECURITYGROUPS: 'security_group'} diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py index 70acf2e86a2..9e001c7dbdd 100644 --- a/neutron/db/models_v2.py +++ b/neutron/db/models_v2.py @@ -228,7 +228,7 @@ class Subnet(standard_attr.HasStandardAttributes, model_base.BASEV2, # subnets don't have their own rbac_entries, they just inherit from # the network rbac entries rbac_entries = orm.relationship( - rbac_db_models.NetworkRBAC, lazy='subquery', uselist=True, + rbac_db_models.NetworkRBAC, lazy='joined', uselist=True, foreign_keys='Subnet.network_id', primaryjoin='Subnet.network_id==NetworkRBAC.object_id', viewonly=True) @@ -282,7 +282,7 @@ class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2, lazy='subquery') rbac_entries = sa.orm.relationship(rbac_db_models.SubnetPoolRBAC, backref='subnetpools', - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') api_collections = [subnetpool_def.COLLECTION_NAME] collection_resource_map = {subnetpool_def.COLLECTION_NAME: @@ -304,7 +304,7 @@ class Network(standard_attr.HasStandardAttributes, model_base.BASEV2, rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, backref=orm.backref('network', load_on_pending=True), - lazy='subquery', + lazy='joined', cascade='all, delete, delete-orphan') availability_zone_hints = sa.Column(sa.String(255)) mtu = sa.Column(sa.Integer, nullable=False, diff --git a/neutron/db/qos/models.py b/neutron/db/qos/models.py index f0662d5a91f..73233e15f3f 100644 --- a/neutron/db/qos/models.py +++ b/neutron/db/qos/models.py @@ -29,7 +29,7 @@ class QosPolicy(standard_attr.HasStandardAttributes, model_base.BASEV2, __tablename__ = 'qos_policies' name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) rbac_entries = sa.orm.relationship(rbac_db_models.QosPolicyRBAC, - backref='qos_policy', lazy='subquery', + backref='qos_policy', lazy='joined', cascade='all, delete, delete-orphan') api_collections = ['policies'] collection_resource_map = {'policies': 'policy'} diff --git a/neutron/tests/unit/objects/test_address_group.py b/neutron/tests/unit/objects/test_address_group.py index c932f20577d..a3b5f89e178 100644 --- a/neutron/tests/unit/objects/test_address_group.py +++ b/neutron/tests/unit/objects/test_address_group.py @@ -58,6 +58,7 @@ class AddressGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = address_group.AddressGroupRBAC + _parent_class = address_group.AddressGroup def setUp(self): super(AddressGroupRBACDbObjectTestCase, self).setUp() diff --git a/neutron/tests/unit/objects/test_address_scope.py b/neutron/tests/unit/objects/test_address_scope.py index 77d88f408a8..d50825cde99 100644 --- a/neutron/tests/unit/objects/test_address_scope.py +++ b/neutron/tests/unit/objects/test_address_scope.py @@ -36,6 +36,7 @@ class AddressScopeRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = address_scope.AddressScopeRBAC + _parent_class = address_scope.AddressScope def setUp(self): super(AddressScopeRBACDbObjectTestCase, self).setUp() diff --git a/neutron/tests/unit/objects/test_network.py b/neutron/tests/unit/objects/test_network.py index bfba77447f2..57204dce3c0 100644 --- a/neutron/tests/unit/objects/test_network.py +++ b/neutron/tests/unit/objects/test_network.py @@ -12,6 +12,8 @@ from unittest import mock +from neutron_lib.api.definitions import availability_zone as az_def + from neutron.db import rbac_db_models from neutron.objects import base as obj_base from neutron.objects import network @@ -27,6 +29,7 @@ class NetworkRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = network.NetworkRBAC + _parent_class = network.Network def setUp(self): self._mock_get_valid_actions = mock.patch.object( @@ -50,6 +53,13 @@ def test_object_version_degradation_1_1_to_1_0_no_id_no_project_id(self): network_rbac_obj['versioned_object.data']) self.assertNotIn('id', network_rbac_obj['versioned_object.data']) + def _create_random_parent_object(self): + objclass_fields = self.get_random_db_fields(self._parent_class) + objclass_fields.pop(az_def.AZ_HINTS) + _obj = self._parent_class(self.context, **objclass_fields) + _obj.create() + return _obj + class NetworkRBACIfaceOjectTestCase(test_rbac.TestRBACObjectMixin, obj_test_base.BaseObjectIfaceTestCase): diff --git a/neutron/tests/unit/objects/test_rbac.py b/neutron/tests/unit/objects/test_rbac.py index 8775a4046bc..02cec9cd0fa 100644 --- a/neutron/tests/unit/objects/test_rbac.py +++ b/neutron/tests/unit/objects/test_rbac.py @@ -9,10 +9,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import random +import random from unittest import mock +from neutron_lib import context + +from neutron.db import rbac_db_models from neutron.objects import address_group from neutron.objects import address_scope from neutron.objects import network @@ -26,6 +29,9 @@ class TestRBACObjectMixin(object): + _test_class = None + _parent_class = None + def get_random_object_fields(self, obj_cls=None): fields = (super(TestRBACObjectMixin, self). get_random_object_fields(obj_cls)) @@ -34,6 +40,35 @@ def get_random_object_fields(self, obj_cls=None): fields['action'] = rnd_actions[idx] return fields + def _create_random_parent_object(self): + objclass_fields = self.get_random_db_fields(self._parent_class) + _obj = self._parent_class(self.context, **objclass_fields) + _obj.create() + return _obj + + def test_rbac_shared_on_parent_object(self): + if not self._test_class or not self._parent_class: + self.skipTest('Mixin class, skipped test') + project_id = self.objs[0].project_id + _obj_shared = self._create_random_parent_object() + # Create a second object that won't be shared and thus won't be + # retrieved by the non-admin users. + self._create_random_parent_object() + for idx in range(3): + project = 'project_%s' % idx + rbac = self._test_class( + self.context, project_id=project_id, target_project=project, + action=rbac_db_models.ACCESS_SHARED, + object_id=_obj_shared.id) + rbac.create() + + for idx in range(3): + project = 'project_%s' % idx + ctx_no_admin = context.Context(user_id='user', tenant_id=project, + is_admin=False) + objects = self._parent_class.get_objects(ctx_no_admin) + self.assertEqual([_obj_shared.id], [_obj.id for _obj in objects]) + class RBACBaseObjectTestCase(neutron_test_base.BaseTestCase): diff --git a/neutron/tests/unit/objects/test_securitygroup.py b/neutron/tests/unit/objects/test_securitygroup.py index 192cf86c75a..53a7901d3ac 100644 --- a/neutron/tests/unit/objects/test_securitygroup.py +++ b/neutron/tests/unit/objects/test_securitygroup.py @@ -27,6 +27,7 @@ class SecurityGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, testlib_api.SqlTestCase): _test_class = securitygroup.SecurityGroupRBAC + _parent_class = securitygroup.SecurityGroup def setUp(self): super(SecurityGroupRBACDbObjectTestCase, self).setUp() diff --git a/neutron/tests/unit/objects/test_subnetpool.py b/neutron/tests/unit/objects/test_subnetpool.py index 57834dc756e..8f3f0fa6ac5 100644 --- a/neutron/tests/unit/objects/test_subnetpool.py +++ b/neutron/tests/unit/objects/test_subnetpool.py @@ -195,6 +195,7 @@ class SubnetPoolRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, SubnetPoolTestMixin): _test_class = subnetpool.SubnetPoolRBAC + _parent_class = subnetpool.SubnetPool def setUp(self): super(SubnetPoolRBACDbObjectTestCase, self).setUp() From b37a2f80ee11715643929e1af809aa9edd814ed0 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 15 May 2023 12:29:42 -0400 Subject: [PATCH 2/4] Start metadata proxy even if IPv6 DAD fails A recent change suppressed the IPv6 DAD failure and removed the address when multiple DHCP agents were configured on the same network, https://review.opendev.org/c/openstack/neutron/+/880957 But it also changed the behavior to not enable IPv4 metadata in this case. Restore the old behavior by not returning early in the DAD failure case. The callback that builds the config file was moved until after the address was bound to make the two steps more obvious. Conflicts: neutron/tests/unit/agent/metadata/test_driver.py Related-bug: #1953165 Change-Id: I8436c6c9da9a2533ca27ff7312f5b2c7ea41e94f (cherry picked from commit 846003c4379124de6ffb3628ef1feb12a62a9cfa) (cherry picked from commit e7f85abae6a46a115582b80d1909e3565d859e9b) (cherry picked from commit 1a711f399abebff6572551ef4e3f7b92397caab5) --- neutron/agent/metadata/driver.py | 21 ++++++----- .../tests/unit/agent/metadata/test_driver.py | 35 ++++++++++--------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/neutron/agent/metadata/driver.py b/neutron/agent/metadata/driver.py index 0b23879354f..2c9af24d5e1 100644 --- a/neutron/agent/metadata/driver.py +++ b/neutron/agent/metadata/driver.py @@ -255,14 +255,6 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, bind_address="0.0.0.0", network_id=None, router_id=None, bind_address_v6=None, bind_interface=None): - uuid = network_id or router_id - callback = cls._get_metadata_proxy_callback( - bind_address, port, conf, - network_id=network_id, router_id=router_id, - bind_address_v6=bind_address_v6, bind_interface=bind_interface) - pm = cls._get_metadata_proxy_process_manager(uuid, conf, - ns_name=ns_name, - callback=callback) if bind_interface is not None and bind_address_v6 is not None: # HAProxy cannot bind() until IPv6 Duplicate Address Detection # completes. We must wait until the address leaves its 'tentative' @@ -290,7 +282,18 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, except Exception as exc: # do not re-raise a delete failure, just log LOG.info('Address deletion failure: %s', str(exc)) - return + + # Do not use the address or interface when DAD fails + bind_address_v6 = bind_interface = None + + uuid = network_id or router_id + callback = cls._get_metadata_proxy_callback( + bind_address, port, conf, + network_id=network_id, router_id=router_id, + bind_address_v6=bind_address_v6, bind_interface=bind_interface) + pm = cls._get_metadata_proxy_process_manager(uuid, conf, + ns_name=ns_name, + callback=callback) pm.enable() monitor.register(uuid, METADATA_SERVICE_NAME, pm) cls.monitors[router_id] = pm diff --git a/neutron/tests/unit/agent/metadata/test_driver.py b/neutron/tests/unit/agent/metadata/test_driver.py index 7d4df6750cb..e01300f7b83 100644 --- a/neutron/tests/unit/agent/metadata/test_driver.py +++ b/neutron/tests/unit/agent/metadata/test_driver.py @@ -176,9 +176,12 @@ def _test_spawn_metadata_proxy(self, dad_failed=False): "%s.conf" % router_id) mock_open = self.useFixture( lib_fixtures.OpenFixture(cfg_file)).mock_open + bind_v6_line = 'bind %s:%s interface %s' % ( + self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if') if dad_failed: mock_wait.side_effect = ip_lib.DADFailed( - address=self.METADATA_DEFAULT_IP, reason='DAD failed') + address=self.METADATA_DEFAULT_IPV6, reason='DAD failed') + bind_v6_line = '' else: mock_wait.return_value = True agent.metadata_driver.spawn_monitored_metadata_proxy( @@ -197,8 +200,6 @@ def _test_spawn_metadata_proxy(self, dad_failed=False): log_tag = ("haproxy-" + metadata_driver.METADATA_SERVICE_NAME + "-" + router_id) - bind_v6_line = 'bind %s:%s interface %s' % ( - self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if') cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % { 'user': self.EUNAME, 'group': self.EGNAME, @@ -214,26 +215,26 @@ def _test_spawn_metadata_proxy(self, dad_failed=False): 'bind_v6_line': bind_v6_line} if dad_failed: - agent.process_monitor.register.assert_not_called() mock_del.assert_called_once_with(self.METADATA_DEFAULT_IPV6, 'fake-if', namespace=router_ns) else: - mock_open.assert_has_calls([ - mock.call(cfg_file, 'w'), - mock.call().write(cfg_contents)], any_order=True) - - ip_mock.assert_has_calls([ - mock.call(namespace=router_ns), - mock.call().netns.execute(netns_execute_args, - addl_env=None, run_as_root=True) - ]) - - agent.process_monitor.register.assert_called_once_with( - router_id, metadata_driver.METADATA_SERVICE_NAME, - mock.ANY) mock_del.assert_not_called() + mock_open.assert_has_calls([ + mock.call(cfg_file, 'w'), + mock.call().write(cfg_contents)], any_order=True) + + ip_mock.assert_has_calls([ + mock.call(namespace=router_ns), + mock.call().netns.execute(netns_execute_args, addl_env=None, + run_as_root=True) + ]) + + agent.process_monitor.register.assert_called_once_with( + router_id, metadata_driver.METADATA_SERVICE_NAME, + mock.ANY) + def test_spawn_metadata_proxy(self): self._test_spawn_metadata_proxy() From b6145ee13ee0ed08b13181ec2a64b0d362559a8b Mon Sep 17 00:00:00 2001 From: Miro Tomaska Date: Tue, 18 Apr 2023 13:21:53 -0500 Subject: [PATCH 3/4] Handle no more IP addresses available during a network sync In some corner cases, like when db_sync is performed after OVS to OVN migration, an implicit metadata port creation on a network with depleted IP pool can raise IP allocation error. Just catch and log this error such that the db_sync tool can finish syncing. Change-Id: Ibb32ec5492c4fe00b9dac510f7e69160982992dd (cherry picked from commit a86e300a0b3c9b00974041935861ffb9279a6960) --- .../plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py index 2c7482b16a6..c63fb303775 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -1028,6 +1028,10 @@ def sync_networks_ports_and_dhcp_opts(self, ctx): except RuntimeError: LOG.warning("Create network in OVN NB failed for " "network %s", network['id']) + except n_exc.IpAddressGenerationFailure: + LOG.warning("No more IP addresses available during " + "implicit port creation while creating " + "network %s", network['id']) self._sync_metadata_ports(ctx, db_ports) From 70a7188209b78cf5d966452065921942ff442874 Mon Sep 17 00:00:00 2001 From: Miro Tomaska Date: Mon, 9 Jan 2023 16:21:59 -0600 Subject: [PATCH 4/4] [OVN][Migration] Enable settings backup subnet for NFS clients MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the BACKUP_MIGRATION_IP is set to a different IP outside of the default nets[1] set in the “openstack overcloud backup” playbook setup_nfs role[2]. Then the NFS will fail to mount directories during the backup, because they will not be reachable | permitted. This change simply adds a new variable BACKUP_MIGRATION_CTL_PLANE_CIDRS into the ovn_migration script to allow the user to overwrite the extra-var used for Openstack overcloud backup --setup-nfs command. [1] https://opendev.org/openstack/tripleo-ansible/src/commit/e281ae7624774d71f22fbb993af967ed1ec08780/tripleo_ansible/roles/backup_and_restore/defaults/main.yml#L47 [2] https://opendev.org/openstack/tripleo-ansible/src/commit/e281ae7624774d71f22fbb993af967ed1ec08780/tripleo_ansible/roles/backup_and_restore/tasks/setup_nfs.yml#L127 Change-Id: I160dfc4e893b93ac7a40e19b3dd6b89750dac57d (cherry picked from commit b677d65b2d9cf9e3ff58677300712fe3fe40da04) --- doc/source/ovn/migration.rst | 6 ++++++ tools/ovn_migration/tripleo_environment/ovn_migration.sh | 4 +++- .../playbooks/roles/recovery-backup/tasks/main.yml | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/source/ovn/migration.rst b/doc/source/ovn/migration.rst index 7fa733ce4ff..38d9bb65a52 100644 --- a/doc/source/ovn/migration.rst +++ b/doc/source/ovn/migration.rst @@ -155,6 +155,12 @@ Perform the following steps in the undercloud server that will be used as a NFS server to store the backup. Default: 192.168.24.1 + * BACKUP_MIGRATION_CTL_PLANE_CIDRS - Only used if CREATE_BACKUP is enabled. + A comma separated string of control plane subnets in CIDR notation for the + controllers being backed up. The specified subnets will be used to enable + NFS remote clients connections. + Default: 192.168.24.0/24 + .. warning:: Please note that VALIDATE_MIGRATION requires enough quota (2 diff --git a/tools/ovn_migration/tripleo_environment/ovn_migration.sh b/tools/ovn_migration/tripleo_environment/ovn_migration.sh index 271783e74a5..14c6ce7d0c2 100644 --- a/tools/ovn_migration/tripleo_environment/ovn_migration.sh +++ b/tools/ovn_migration/tripleo_environment/ovn_migration.sh @@ -42,7 +42,8 @@ LANG=C : ${VALIDATE_MIGRATION:=False} : ${DHCP_RENEWAL_TIME:=30} : ${CREATE_BACKUP:=True} -: ${BACKUP_MIGRATION_IP:=192.168.24.1} # TODO: Document this new var +: ${BACKUP_MIGRATION_IP:=192.168.24.1} +: ${BACKUP_MIGRATION_CTL_PLANE_CIDRS:=192.168.24.0/24} check_for_necessary_files() { @@ -328,6 +329,7 @@ start_migration() { -e overcloudrc=$OVERCLOUDRC_FILE \ -e stackrc=$STACKRC_FILE \ -e backup_migration_ip=$BACKUP_MIGRATION_IP \ + -e backup_migration_ctl_plane_cidrs=$BACKUP_MIGRATION_CTL_PLANE_CIDRS \ -e create_backup=$CREATE_BACKUP \ -e ansible_inventory=$inventory_file \ -e validate_migration=$VALIDATE_MIGRATION $* diff --git a/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml b/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml index 6ed510316ae..d018183fb5c 100644 --- a/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml +++ b/tools/ovn_migration/tripleo_environment/playbooks/roles/recovery-backup/tasks/main.yml @@ -36,6 +36,7 @@ --setup-nfs \ --extra-vars '{ "tripleo_backup_and_restore_server": {{ backup_migration_ip }}, + "tripleo_backup_and_restore_clients_nets": {{ backup_migration_ctl_plane_cidrs.split(',') }}, "nfs_server_group_name": {{ revert_preparation_server_name }} }'