Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions doc/source/ovn/migration.rst
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,12 @@ Perform the following steps in the undercloud
server that will be used as a NFS server to store the backup.
Default: 192.168.24.1

* BACKUP_MIGRATION_CTL_PLANE_CIDRS - Only used if CREATE_BACKUP is enabled.
A comma separated string of control plane subnets in CIDR notation for the
controllers being backed up. The specified subnets will be used to enable
NFS remote clients connections.
Default: 192.168.24.0/24

.. warning::

Please note that VALIDATE_MIGRATION requires enough quota (2
Expand Down
21 changes: 12 additions & 9 deletions neutron/agent/metadata/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,14 +255,6 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
bind_address="0.0.0.0", network_id=None,
router_id=None, bind_address_v6=None,
bind_interface=None):
uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
bind_address, port, conf,
network_id=network_id, router_id=router_id,
bind_address_v6=bind_address_v6, bind_interface=bind_interface)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
if bind_interface is not None and bind_address_v6 is not None:
# HAProxy cannot bind() until IPv6 Duplicate Address Detection
# completes. We must wait until the address leaves its 'tentative'
Expand Down Expand Up @@ -290,7 +282,18 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
except Exception as exc:
# do not re-raise a delete failure, just log
LOG.info('Address deletion failure: %s', str(exc))
return

# Do not use the address or interface when DAD fails
bind_address_v6 = bind_interface = None

uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
bind_address, port, conf,
network_id=network_id, router_id=router_id,
bind_address_v6=bind_address_v6, bind_interface=bind_interface)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm
Expand Down
2 changes: 1 addition & 1 deletion neutron/db/models/address_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,6 @@ class AddressGroup(standard_attr.HasStandardAttributes,
cascade='all, delete-orphan')
rbac_entries = sa.orm.relationship(rbac_db_models.AddressGroupRBAC,
backref='address_groups',
lazy='subquery',
lazy='joined',
cascade='all, delete, delete-orphan')
api_collections = [ag.ALIAS]
2 changes: 1 addition & 1 deletion neutron/db/models/address_scope.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,5 @@ class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasProject):

rbac_entries = sa.orm.relationship(rbac_db_models.AddressScopeRBAC,
backref='address_scopes',
lazy='subquery',
lazy='joined',
cascade='all, delete, delete-orphan')
2 changes: 1 addition & 1 deletion neutron/db/models/securitygroup.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class SecurityGroup(standard_attr.HasStandardAttributes, model_base.BASEV2,
nullable=False)
rbac_entries = sa.orm.relationship(rbac_db_models.SecurityGroupRBAC,
backref='security_group',
lazy='subquery',
lazy='joined',
cascade='all, delete, delete-orphan')
api_collections = [sg.SECURITYGROUPS]
collection_resource_map = {sg.SECURITYGROUPS: 'security_group'}
Expand Down
6 changes: 3 additions & 3 deletions neutron/db/models_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ class Subnet(standard_attr.HasStandardAttributes, model_base.BASEV2,
# subnets don't have their own rbac_entries, they just inherit from
# the network rbac entries
rbac_entries = orm.relationship(
rbac_db_models.NetworkRBAC, lazy='subquery', uselist=True,
rbac_db_models.NetworkRBAC, lazy='joined', uselist=True,
foreign_keys='Subnet.network_id',
primaryjoin='Subnet.network_id==NetworkRBAC.object_id',
viewonly=True)
Expand Down Expand Up @@ -282,7 +282,7 @@ class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2,
lazy='subquery')
rbac_entries = sa.orm.relationship(rbac_db_models.SubnetPoolRBAC,
backref='subnetpools',
lazy='subquery',
lazy='joined',
cascade='all, delete, delete-orphan')
api_collections = [subnetpool_def.COLLECTION_NAME]
collection_resource_map = {subnetpool_def.COLLECTION_NAME:
Expand All @@ -304,7 +304,7 @@ class Network(standard_attr.HasStandardAttributes, model_base.BASEV2,
rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC,
backref=orm.backref('network',
load_on_pending=True),
lazy='subquery',
lazy='joined',
cascade='all, delete, delete-orphan')
availability_zone_hints = sa.Column(sa.String(255))
mtu = sa.Column(sa.Integer, nullable=False,
Expand Down
2 changes: 1 addition & 1 deletion neutron/db/qos/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class QosPolicy(standard_attr.HasStandardAttributes, model_base.BASEV2,
__tablename__ = 'qos_policies'
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE))
rbac_entries = sa.orm.relationship(rbac_db_models.QosPolicyRBAC,
backref='qos_policy', lazy='subquery',
backref='qos_policy', lazy='joined',
cascade='all, delete, delete-orphan')
api_collections = ['policies']
collection_resource_map = {'policies': 'policy'}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1028,6 +1028,10 @@ def sync_networks_ports_and_dhcp_opts(self, ctx):
except RuntimeError:
LOG.warning("Create network in OVN NB failed for "
"network %s", network['id'])
except n_exc.IpAddressGenerationFailure:
LOG.warning("No more IP addresses available during "
"implicit port creation while creating "
"network %s", network['id'])

self._sync_metadata_ports(ctx, db_ports)

Expand Down
35 changes: 18 additions & 17 deletions neutron/tests/unit/agent/metadata/test_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,12 @@ def _test_spawn_metadata_proxy(self, dad_failed=False):
"%s.conf" % router_id)
mock_open = self.useFixture(
lib_fixtures.OpenFixture(cfg_file)).mock_open
bind_v6_line = 'bind %s:%s interface %s' % (
self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if')
if dad_failed:
mock_wait.side_effect = ip_lib.DADFailed(
address=self.METADATA_DEFAULT_IP, reason='DAD failed')
address=self.METADATA_DEFAULT_IPV6, reason='DAD failed')
bind_v6_line = ''
else:
mock_wait.return_value = True
agent.metadata_driver.spawn_monitored_metadata_proxy(
Expand All @@ -197,8 +200,6 @@ def _test_spawn_metadata_proxy(self, dad_failed=False):

log_tag = ("haproxy-" + metadata_driver.METADATA_SERVICE_NAME +
"-" + router_id)
bind_v6_line = 'bind %s:%s interface %s' % (
self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if')
cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % {
'user': self.EUNAME,
'group': self.EGNAME,
Expand All @@ -214,26 +215,26 @@ def _test_spawn_metadata_proxy(self, dad_failed=False):
'bind_v6_line': bind_v6_line}

if dad_failed:
agent.process_monitor.register.assert_not_called()
mock_del.assert_called_once_with(self.METADATA_DEFAULT_IPV6,
'fake-if',
namespace=router_ns)
else:
mock_open.assert_has_calls([
mock.call(cfg_file, 'w'),
mock.call().write(cfg_contents)], any_order=True)

ip_mock.assert_has_calls([
mock.call(namespace=router_ns),
mock.call().netns.execute(netns_execute_args,
addl_env=None, run_as_root=True)
])

agent.process_monitor.register.assert_called_once_with(
router_id, metadata_driver.METADATA_SERVICE_NAME,
mock.ANY)
mock_del.assert_not_called()

mock_open.assert_has_calls([
mock.call(cfg_file, 'w'),
mock.call().write(cfg_contents)], any_order=True)

ip_mock.assert_has_calls([
mock.call(namespace=router_ns),
mock.call().netns.execute(netns_execute_args, addl_env=None,
run_as_root=True)
])

agent.process_monitor.register.assert_called_once_with(
router_id, metadata_driver.METADATA_SERVICE_NAME,
mock.ANY)

def test_spawn_metadata_proxy(self):
self._test_spawn_metadata_proxy()

Expand Down
1 change: 1 addition & 0 deletions neutron/tests/unit/objects/test_address_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class AddressGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin,
testlib_api.SqlTestCase):

_test_class = address_group.AddressGroupRBAC
_parent_class = address_group.AddressGroup

def setUp(self):
super(AddressGroupRBACDbObjectTestCase, self).setUp()
Expand Down
1 change: 1 addition & 0 deletions neutron/tests/unit/objects/test_address_scope.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ class AddressScopeRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin,
testlib_api.SqlTestCase):

_test_class = address_scope.AddressScopeRBAC
_parent_class = address_scope.AddressScope

def setUp(self):
super(AddressScopeRBACDbObjectTestCase, self).setUp()
Expand Down
10 changes: 10 additions & 0 deletions neutron/tests/unit/objects/test_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@

from unittest import mock

from neutron_lib.api.definitions import availability_zone as az_def

from neutron.db import rbac_db_models
from neutron.objects import base as obj_base
from neutron.objects import network
Expand All @@ -27,6 +29,7 @@ class NetworkRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin,
testlib_api.SqlTestCase):

_test_class = network.NetworkRBAC
_parent_class = network.Network

def setUp(self):
self._mock_get_valid_actions = mock.patch.object(
Expand All @@ -50,6 +53,13 @@ def test_object_version_degradation_1_1_to_1_0_no_id_no_project_id(self):
network_rbac_obj['versioned_object.data'])
self.assertNotIn('id', network_rbac_obj['versioned_object.data'])

def _create_random_parent_object(self):
objclass_fields = self.get_random_db_fields(self._parent_class)
objclass_fields.pop(az_def.AZ_HINTS)
_obj = self._parent_class(self.context, **objclass_fields)
_obj.create()
return _obj


class NetworkRBACIfaceOjectTestCase(test_rbac.TestRBACObjectMixin,
obj_test_base.BaseObjectIfaceTestCase):
Expand Down
37 changes: 36 additions & 1 deletion neutron/tests/unit/objects/test_rbac.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,13 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random

import random
from unittest import mock

from neutron_lib import context

from neutron.db import rbac_db_models
from neutron.objects import address_group
from neutron.objects import address_scope
from neutron.objects import network
Expand All @@ -26,6 +29,9 @@

class TestRBACObjectMixin(object):

_test_class = None
_parent_class = None

def get_random_object_fields(self, obj_cls=None):
fields = (super(TestRBACObjectMixin, self).
get_random_object_fields(obj_cls))
Expand All @@ -34,6 +40,35 @@ def get_random_object_fields(self, obj_cls=None):
fields['action'] = rnd_actions[idx]
return fields

def _create_random_parent_object(self):
objclass_fields = self.get_random_db_fields(self._parent_class)
_obj = self._parent_class(self.context, **objclass_fields)
_obj.create()
return _obj

def test_rbac_shared_on_parent_object(self):
if not self._test_class or not self._parent_class:
self.skipTest('Mixin class, skipped test')
project_id = self.objs[0].project_id
_obj_shared = self._create_random_parent_object()
# Create a second object that won't be shared and thus won't be
# retrieved by the non-admin users.
self._create_random_parent_object()
for idx in range(3):
project = 'project_%s' % idx
rbac = self._test_class(
self.context, project_id=project_id, target_project=project,
action=rbac_db_models.ACCESS_SHARED,
object_id=_obj_shared.id)
rbac.create()

for idx in range(3):
project = 'project_%s' % idx
ctx_no_admin = context.Context(user_id='user', tenant_id=project,
is_admin=False)
objects = self._parent_class.get_objects(ctx_no_admin)
self.assertEqual([_obj_shared.id], [_obj.id for _obj in objects])


class RBACBaseObjectTestCase(neutron_test_base.BaseTestCase):

Expand Down
1 change: 1 addition & 0 deletions neutron/tests/unit/objects/test_securitygroup.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class SecurityGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin,
testlib_api.SqlTestCase):

_test_class = securitygroup.SecurityGroupRBAC
_parent_class = securitygroup.SecurityGroup

def setUp(self):
super(SecurityGroupRBACDbObjectTestCase, self).setUp()
Expand Down
1 change: 1 addition & 0 deletions neutron/tests/unit/objects/test_subnetpool.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ class SubnetPoolRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin,
SubnetPoolTestMixin):

_test_class = subnetpool.SubnetPoolRBAC
_parent_class = subnetpool.SubnetPool

def setUp(self):
super(SubnetPoolRBACDbObjectTestCase, self).setUp()
Expand Down
4 changes: 3 additions & 1 deletion tools/ovn_migration/tripleo_environment/ovn_migration.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ LANG=C
: ${VALIDATE_MIGRATION:=False}
: ${DHCP_RENEWAL_TIME:=30}
: ${CREATE_BACKUP:=True}
: ${BACKUP_MIGRATION_IP:=192.168.24.1} # TODO: Document this new var
: ${BACKUP_MIGRATION_IP:=192.168.24.1}
: ${BACKUP_MIGRATION_CTL_PLANE_CIDRS:=192.168.24.0/24}


check_for_necessary_files() {
Expand Down Expand Up @@ -328,6 +329,7 @@ start_migration() {
-e overcloudrc=$OVERCLOUDRC_FILE \
-e stackrc=$STACKRC_FILE \
-e backup_migration_ip=$BACKUP_MIGRATION_IP \
-e backup_migration_ctl_plane_cidrs=$BACKUP_MIGRATION_CTL_PLANE_CIDRS \
-e create_backup=$CREATE_BACKUP \
-e ansible_inventory=$inventory_file \
-e validate_migration=$VALIDATE_MIGRATION $*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
--setup-nfs \
--extra-vars '{
"tripleo_backup_and_restore_server": {{ backup_migration_ip }},
"tripleo_backup_and_restore_clients_nets": {{ backup_migration_ctl_plane_cidrs.split(',') }},
"nfs_server_group_name": {{ revert_preparation_server_name }}
}'

Expand Down