From 5860fb21e966ab8f1e011654dd477d7af35f7a27 Mon Sep 17 00:00:00 2001 From: venkata anil Date: Wed, 12 Oct 2016 10:57:46 +0000 Subject: [PATCH] Check for ha port to become ACTIVE After reboot(restart of l3 and l2 agents) of the node routers can be processed by l3 agent before openvswitch agent sets up appropriate ha ports. This change add notification for l3 agent that ha port becomes ACTIVE and keepalived can be enabled. note: Release notes added to specify l3 agent dependency on neutron server. Closes-bug: #1597461 Co-Authored-By: venkata anil (cherry picked from commit 25f5912cf8f69f18d111bd60a6cc6ee488755ff3) Conflicts: neutron/db/l3_hascheduler_db.py neutron/services/l3_router/l3_router_plugin.py neutron/tests/unit/plugins/ml2/test_plugin.py neutron/tests/functional/agent/l3/test_ha_router.py releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml Change-Id: Iedad1ccae45005efaaa74d5571df04197757d07a (cherry picked from commit 4ad841c4cf1b23695a792ea6facf1dbf66cb48e9) split out l3-ha specific test from TestMl2PortsV2 split out test_update_port_status_notify_port_event_after_update from ml2.test_plugin.TestMl2PortsV2 into TestMl2PortsV2WithL3 The change set of 25f5912cf8f69f18d111bd60a6cc6ee488755ff3 change id of Iedad1ccae45005efaaa74d5571df04197757d07a introduced a test, test_update_port_status_notify_port_event_after_update, that is valid only when l3 plugin support l3-ha. Such assumption isn't always true depending on actual ml2 driver. Since test cases in ml2.test_plugin is used as a common base for multiple drivers, test_update_port_status_notify_port_event_after_update, may or may not pass. So split out tests with very specific assumption into a new dedicated testcase so that each driver can safely reuse tests in tests/unit/plugin/ml2 based on their characteristics. Conflicts: neutron/tests/unit/plugins/ml2/test_plugin.py Closes-Bug: #1618601 Change-Id: Ie81dde976649111d029a7d107c99960aded64915 (cherry picked from commit 03c412ff011a8d4e86afbada24db675028861728) Change-Id: Iedad1ccae45005efaaa74d5571df04197757d07a (cherry picked from commit 4ad841c4cf1b23695a792ea6facf1dbf66cb48e9) --- neutron/agent/l3/ha_router.py | 4 ++- neutron/db/l3_hascheduler_db.py | 29 ++++++++++++++++++ .../services/l3_router/l3_router_plugin.py | 3 ++ .../functional/agent/l3/test_ha_router.py | 12 ++++++++ neutron/tests/unit/plugins/ml2/test_plugin.py | 30 +++++++++++++++++++ ...nt-server-dependency-1fcb775328ac4502.yaml | 6 ++++ 6 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py index b7d7031fd7c..68324bf3ef1 100644 --- a/neutron/agent/l3/ha_router.py +++ b/neutron/agent/l3/ha_router.py @@ -385,7 +385,9 @@ def delete(self, agent): def process(self, agent): super(HaRouter, self).process(agent) - if self.ha_port: + self.ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) + if (self.ha_port and + self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE): self.enable_keepalived() @common_utils.synchronized('enable_radvd') diff --git a/neutron/db/l3_hascheduler_db.py b/neutron/db/l3_hascheduler_db.py index 7acb1c80f72..96dd9a178c4 100644 --- a/neutron/db/l3_hascheduler_db.py +++ b/neutron/db/l3_hascheduler_db.py @@ -12,13 +12,20 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron_lib import constants from sqlalchemy import func from sqlalchemy import sql +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.db import agents_db from neutron.db import l3_agentschedulers_db as l3_sch_db from neutron.db import l3_attrs_db from neutron.db import l3_db +from neutron.extensions import portbindings +from neutron import manager +from neutron.plugins.common import constants as service_constants class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin): @@ -81,3 +88,25 @@ def list_l3_agents_hosting_router(self, context, router_id): bindings = [(binding.l3_agent, None) for binding in bindings] return self._get_agents_dict_for_router(bindings) + + +def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs): + port_db = kwargs.get('port') + context = kwargs.get('context') + core_plugin = manager.NeutronManager.get_plugin() + new_port = core_plugin._make_port_dict(port_db) + host = new_port.get(portbindings.HOST_ID) + + if new_port and host: + new_device_owner = new_port.get('device_owner', '') + if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and + new_port['status'] == constants.PORT_STATUS_ACTIVE): + l3plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + l3plugin.l3_rpc_notifier.routers_updated_on_host( + context, [new_port['device_id']], host) + + +def subscribe(): + registry.subscribe( + _notify_l3_agent_ha_port_update, resources.PORT, events.AFTER_UPDATE) diff --git a/neutron/services/l3_router/l3_router_plugin.py b/neutron/services/l3_router/l3_router_plugin.py index 70fcdce04f8..1cb85fe6930 100644 --- a/neutron/services/l3_router/l3_router_plugin.py +++ b/neutron/services/l3_router/l3_router_plugin.py @@ -30,6 +30,7 @@ from neutron.db import l3_dvrscheduler_db from neutron.db import l3_gwmode_db from neutron.db import l3_hamode_db +from neutron.db import l3_hascheduler_db from neutron.plugins.common import constants from neutron.quota import resource_registry from neutron.services import service_base @@ -65,6 +66,8 @@ def __init__(self): super(L3RouterPlugin, self).__init__() if 'dvr' in self.supported_extension_aliases: l3_dvrscheduler_db.subscribe() + if 'l3-ha' in self.supported_extension_aliases: + l3_hascheduler_db.subscribe() l3_db.subscribe() self.start_rpc_listeners() diff --git a/neutron/tests/functional/agent/l3/test_ha_router.py b/neutron/tests/functional/agent/l3/test_ha_router.py index fd134af7324..454be2ebe14 100644 --- a/neutron/tests/functional/agent/l3/test_ha_router.py +++ b/neutron/tests/functional/agent/l3/test_ha_router.py @@ -256,6 +256,18 @@ def test_removing_floatingip_immediately(self): # call the configure_fip_addresses directly here router.configure_fip_addresses(interface_name) + def test_ha_port_status_update(self): + router_info = self.generate_router_info(enable_ha=True) + router_info[l3_constants.HA_INTERFACE_KEY]['status'] = ( + l3_constants.PORT_STATUS_DOWN) + router1 = self.manage_router(self.agent, router_info) + utils.wait_until_true(lambda: router1.ha_state == 'backup') + + router1.router[l3_constants.HA_INTERFACE_KEY]['status'] = ( + l3_constants.PORT_STATUS_ACTIVE) + self.agent._process_updated_router(router1.router) + utils.wait_until_true(lambda: router1.ha_state == 'master') + class L3HATestFailover(framework.L3AgentTestFramework): diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index d1f8b342ac1..d305bfacda2 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -32,6 +32,7 @@ from neutron.callbacks import resources from neutron.common import constants from neutron.common import exceptions as exc +from neutron.common import topics from neutron.common import utils from neutron import context from neutron.db import agents_db @@ -54,6 +55,8 @@ from neutron.plugins.ml2.drivers import type_vlan from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin +from neutron.plugins.ml2 import rpc +from neutron.services.l3_router import l3_router_plugin from neutron.services.qos import qos_consts from neutron.tests import base from neutron.tests.unit import _test_extension_portbindings as test_bindings @@ -76,6 +79,7 @@ DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' HOST = 'fake_host' +TEST_ROUTER_ID = 'router_id' # TODO(marun) - Move to somewhere common for reuse @@ -873,6 +877,32 @@ def commit(self, con): self.assertTrue(listener.except_raised) +class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase): + """For testing methods that require the L3 service plugin.""" + + def test_update_port_status_notify_port_event_after_update(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + notifier = rpc.AgentNotifierApi(topics.AGENT) + self.plugin_rpc = rpc.RpcCallbacks(notifier, plugin.type_manager) + # enable subscription for events + l3_router_plugin.L3RouterPlugin() + l3plugin = manager.NeutronManager.get_service_plugins().get( + p_const.L3_ROUTER_NAT) + host_arg = {portbindings.HOST_ID: HOST} + with mock.patch.object(l3plugin.l3_rpc_notifier, + 'routers_updated_on_host') as mock_updated: + with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF, + device_id=TEST_ROUTER_ID, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + self.plugin_rpc.update_device_up( + ctx, agent_id="theAgentId", device=port['port']['id'], + host=HOST) + mock_updated.assert_called_once_with( + mock.ANY, [TEST_ROUTER_ID], HOST) + + class TestMl2PluginOnly(Ml2PluginV2TestCase): """For testing methods that don't call drivers""" diff --git a/releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml b/releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml new file mode 100644 index 00000000000..a3df9dd791a --- /dev/null +++ b/releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - Server notifies L3 HA agents when HA router interface + port status becomes active. Then L3 HA agents spawn + keepalived process. So, server has to be restarted + before the L3 agents during upgrade.