From dec4570c104f27c09d73a63b00e80e59eb9ee2c0 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Sat, 20 Aug 2022 15:49:42 -0700 Subject: [PATCH] Handle dual ToR neighbor miss scenario (#2151) * Handle dual ToR neighbor miss scenario (#2137) - When orchagent receives a neighbor update with a zero MAC: - If the neighbor IP is configured for a specific mux cable port in the MUX_CABLE table in CONFIG_DB, handle the neighbor normally (if active for the port, no action is needed. if standby, a tunnel route is created for the neighbor IP) - If the neighbor IP is not configured for a specific port, create a tunnel route for the IP to the peer switch. - When these neighbor IPs are eventually resolved, remove the tunnel route and handle the neighbor normally. - When creating/initializing a mux cable object, set the internal state to standby to match the constructor behavior. - Various formatting fixes inside test_mux.py - Remove references to deprecated `@pytest.yield_fixture` - Add dual ToR neighbor miss test cases: - Test cases and expected results are described in `mux_neigh_miss_tests.py`. These descriptions are used by the generic test runner `test_neighbor_miss` function to execute the test actions and verify expected results - Various setup fixtures and test info fixtures were added - Existing test cases were changed to use these setup fixtures for consistency Signed-off-by: Lawrence Lee Co-authored-by: Sumukha Tumkur Vani --- neighsyncd/neighsync.cpp | 34 ++- neighsyncd/neighsync.h | 2 +- orchagent/muxorch.cpp | 52 ++++ orchagent/muxorch.h | 8 + orchagent/neighorch.cpp | 20 +- orchagent/neighorch.h | 2 + tests/conftest.py | 22 +- tests/mux_neigh_miss_tests.py | 243 +++++++++++++++ tests/test_acl.py | 10 +- tests/test_acl_egress_table.py | 2 +- tests/test_buffer_dynamic.py | 2 +- tests/test_mux.py | 525 +++++++++++++++++++++++++-------- tests/test_nhg.py | 2 +- tests/test_port_config.py | 2 +- 14 files changed, 773 insertions(+), 153 deletions(-) create mode 100644 tests/mux_neigh_miss_tests.py diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index 4589e5c27367..8864746cb563 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -23,7 +23,8 @@ NeighSync::NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb, DBConne m_stateNeighRestoreTable(stateDb, STATE_NEIGH_RESTORE_TABLE_NAME), m_cfgInterfaceTable(cfgDb, CFG_INTF_TABLE_NAME), m_cfgLagInterfaceTable(cfgDb, CFG_LAG_INTF_TABLE_NAME), - m_cfgVlanInterfaceTable(cfgDb, CFG_VLAN_INTF_TABLE_NAME) + m_cfgVlanInterfaceTable(cfgDb, CFG_VLAN_INTF_TABLE_NAME), + m_cfgPeerSwitchTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) { m_AppRestartAssist = new AppRestartAssist(pipelineAppDB, "neighsyncd", "swss", DEFAULT_NEIGHSYNC_WARMSTART_TIMER); if (m_AppRestartAssist) @@ -108,14 +109,39 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) return; } + std::vector peerSwitchKeys; bool delete_key = false; - if ((nlmsg_type == RTM_DELNEIGH) || (state == NUD_INCOMPLETE) || - (state == NUD_FAILED)) + bool use_zero_mac = false; + m_cfgPeerSwitchTable.getKeys(peerSwitchKeys); + bool is_dualtor = peerSwitchKeys.size() > 0; + if (is_dualtor && (state == NUD_INCOMPLETE || state == NUD_FAILED)) + { + SWSS_LOG_INFO("Unable to resolve %s, setting zero MAC", key.c_str()); + use_zero_mac = true; + + // Unresolved neighbor deletion on dual ToR devices must be handled + // separately, otherwise delete_key is never set to true + // and neighorch is never able to remove the neighbor + if (nlmsg_type == RTM_DELNEIGH) + { + delete_key = true; + } + } + else if ((nlmsg_type == RTM_DELNEIGH) || + (state == NUD_INCOMPLETE) || (state == NUD_FAILED)) { delete_key = true; } - nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + if (use_zero_mac) + { + std::string zero_mac = "00:00:00:00:00:00"; + strncpy(macStr, zero_mac.c_str(), zero_mac.length()); + } + else + { + nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + } /* Ignore neighbor entries with Broadcast Mac - Trigger for directed broadcast */ if (!delete_key && (MacAddress(macStr) == MacAddress("ff:ff:ff:ff:ff:ff"))) diff --git a/neighsyncd/neighsync.h b/neighsyncd/neighsync.h index b91392c4c021..8f25ee16c802 100644 --- a/neighsyncd/neighsync.h +++ b/neighsyncd/neighsync.h @@ -36,7 +36,7 @@ class NeighSync : public NetMsg } private: - Table m_stateNeighRestoreTable; + Table m_stateNeighRestoreTable, m_cfgPeerSwitchTable; ProducerStateTable m_neighTable; AppRestartAssist *m_AppRestartAssist; Table m_cfgVlanInterfaceTable, m_cfgLagInterfaceTable, m_cfgInterfaceTable; diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 0561a2336763..296d5a3cf35d 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -1069,6 +1069,37 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) return; } + auto standalone_tunnel_neigh_it = standalone_tunnel_neighbors_.find(update.entry.ip_address); + // Handling zero MAC neighbor updates + if (!update.mac) + { + /* For neighbors that were previously resolvable but are now unresolvable, + * we expect such neighbor entries to be deleted prior to a zero MAC update + * arriving for that same neighbor. + */ + + if (update.add) + { + if (standalone_tunnel_neigh_it == standalone_tunnel_neighbors_.end()) + { + createStandaloneTunnelRoute(update.entry.ip_address); + } + /* If the MAC address in the neighbor entry is zero but the neighbor IP + * is already present in standalone_tunnel_neighbors_, assume we have already + * added a tunnel route for it and exit early + */ + return; + } + } + /* If the update operation for a neighbor contains a non-zero MAC, we must + * make sure to remove any existing tunnel routes to prevent conflicts. + * This block also covers the case of neighbor deletion. + */ + if (standalone_tunnel_neigh_it != standalone_tunnel_neighbors_.end()) + { + removeStandaloneTunnelRoute(update.entry.ip_address); + } + for (auto it = mux_cable_tb_.begin(); it != mux_cable_tb_.end(); it++) { MuxCable* ptr = it->second.get(); @@ -1376,6 +1407,27 @@ bool MuxOrch::delOperation(const Request& request) return true; } +void MuxOrch::createStandaloneTunnelRoute(IpAddress neighborIp) +{ + SWSS_LOG_INFO("Creating standalone tunnel route for neighbor %s", neighborIp.to_string().c_str()); + sai_object_id_t tunnel_nexthop = getNextHopTunnelId(MUX_TUNNEL, mux_peer_switch_); + if (tunnel_nexthop == SAI_NULL_OBJECT_ID) { + SWSS_LOG_NOTICE("%s nexthop not created yet, ignoring tunnel route creation for %s", MUX_TUNNEL, neighborIp.to_string().c_str()); + return; + } + IpPrefix pfx = neighborIp.to_string(); + create_route(pfx, tunnel_nexthop); + standalone_tunnel_neighbors_.insert(neighborIp); +} + +void MuxOrch::removeStandaloneTunnelRoute(IpAddress neighborIp) +{ + SWSS_LOG_INFO("Removing standalone tunnel route for neighbor %s", neighborIp.to_string().c_str()); + IpPrefix pfx = neighborIp.to_string(); + remove_route(pfx); + standalone_tunnel_neighbors_.erase(neighborIp); +} + MuxCableOrch::MuxCableOrch(DBConnector *db, DBConnector *sdb, const std::string& tableName): Orch2(db, tableName, request_), app_tunnel_route_table_(db, APP_TUNNEL_ROUTE_TABLE_NAME), diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 5070818b0b0f..ff66e67ff3b7 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -205,6 +205,13 @@ class MuxOrch : public Orch2, public Observer, public Subject bool getMuxPort(const MacAddress&, const string&, string&); + /*** + * Methods for managing tunnel routes for neighbor IPs not associated + * with a specific mux cable + ***/ + void createStandaloneTunnelRoute(IpAddress neighborIp); + void removeStandaloneTunnelRoute(IpAddress neighborIp); + IpAddress mux_peer_switch_ = 0x0; sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID; @@ -219,6 +226,7 @@ class MuxOrch : public Orch2, public Observer, public Subject FdbOrch *fdb_orch_; MuxCfgRequest request_; + std::set standalone_tunnel_neighbors_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index cd2dc4cd2cf3..eb4afc21932a 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -715,7 +715,16 @@ void NeighOrch::doTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end() || m_syncdNeighbors[neighbor_entry].mac != mac_address) { - if (addNeighbor(neighbor_entry, mac_address)) + // only for unresolvable neighbors that are new + if (!mac_address) + { + if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()) + { + addZeroMacTunnelRoute(neighbor_entry, mac_address); + } + it = consumer.m_toSync.erase(it); + } + else if (addNeighbor(neighbor_entry, mac_address)) { it = consumer.m_toSync.erase(it); } @@ -1716,3 +1725,12 @@ void NeighOrch::updateSrv6Nexthop(const NextHopKey &nh, const sai_object_id_t &n m_syncdNextHops.erase(nh); } } +void NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddress& mac) +{ + SWSS_LOG_INFO("Creating tunnel route for neighbor %s", entry.ip_address.to_string().c_str()); + MuxOrch* mux_orch = gDirectory.get(); + NeighborUpdate update = {entry, mac, true}; + mux_orch->update(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + m_syncdNeighbors[entry] = { mac, false }; +} + diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index 6587606168fe..727797757f4e 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -115,6 +115,8 @@ class NeighOrch : public Orch, public Subject, public Observer bool resolveNeighborEntry(const NeighborEntry &, const MacAddress &); void clearResolvedNeighborEntry(const NeighborEntry &); + + void addZeroMacTunnelRoute(const NeighborEntry &, const MacAddress &); }; #endif /* SWSS_NEIGHORCH_H */ diff --git a/tests/conftest.py b/tests/conftest.py index 437190a68926..9a7abb1f0699 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1794,7 +1794,7 @@ def update_dvs(log_path, new_dvs_env=[]): dvs.runcmd("mv /etc/sonic/config_db.json.orig /etc/sonic/config_db.json") dvs.ctn_restart() -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def dvs(request, manage_dvs) -> DockerVirtualSwitch: dvs_env = getattr(request.module, "DVS_ENV", []) name = request.config.getoption("--dvsname") @@ -1802,7 +1802,7 @@ def dvs(request, manage_dvs) -> DockerVirtualSwitch: return manage_dvs(log_path, dvs_env) -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def vct(request): vctns = request.config.getoption("--vctns") topo = request.config.getoption("--topo") @@ -1821,7 +1821,8 @@ def vct(request): vct.get_logs(request.module.__name__) vct.destroy() -@pytest.yield_fixture + +@pytest.fixture def testlog(request, dvs): dvs.runcmd(f"logger -t pytest === start test {request.node.nodeid} ===") yield testlog @@ -1850,14 +1851,14 @@ def dvs_route(request, dvs) -> DVSRoute: # FIXME: The rest of these also need to be reverted back to normal fixtures to # appease the linter. -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_lag_manager(request, dvs): request.cls.dvs_lag = dvs_lag.DVSLag(dvs.get_asic_db(), dvs.get_config_db(), dvs) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_vlan_manager(request, dvs): request.cls.dvs_vlan = dvs_vlan.DVSVlan(dvs.get_asic_db(), dvs.get_config_db(), @@ -1865,12 +1866,14 @@ def dvs_vlan_manager(request, dvs): dvs.get_counters_db(), dvs.get_app_db()) -@pytest.yield_fixture(scope="class") + +@pytest.fixture(scope="class") def dvs_port_manager(request, dvs): request.cls.dvs_port = dvs_port.DVSPort(dvs.get_asic_db(), dvs.get_config_db()) -@pytest.yield_fixture(scope="class") + +@pytest.fixture(scope="class") def dvs_mirror_manager(request, dvs): request.cls.dvs_mirror = dvs_mirror.DVSMirror(dvs.get_asic_db(), dvs.get_config_db(), @@ -1879,7 +1882,7 @@ def dvs_mirror_manager(request, dvs): dvs.get_app_db()) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_policer_manager(request, dvs): request.cls.dvs_policer = dvs_policer.DVSPolicer(dvs.get_asic_db(), dvs.get_config_db()) @@ -1897,7 +1900,8 @@ def remove_dpb_config_file(dvs): cmd = "mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json" dvs.runcmd(cmd) -@pytest.yield_fixture(scope="module") + +@pytest.fixture(scope="module") def dpb_setup_fixture(dvs): create_dpb_config_file(dvs) if dvs.vct is None: diff --git a/tests/mux_neigh_miss_tests.py b/tests/mux_neigh_miss_tests.py new file mode 100644 index 000000000000..d8c32a29e093 --- /dev/null +++ b/tests/mux_neigh_miss_tests.py @@ -0,0 +1,243 @@ +""" +Test scenarios and related constants for dualtor neighbor miss. + +Each item in NEIGH_MISS_TESTS is a test case, comprising of a list of steps. +Each step is a dictionary containing the action to be performed during that +step, as well as the expected result. +The expected result itself is another dictionary, containing the following +attributes: + - (bool) EXPECT_ROUTE: if we expect a route entry in ASIC_DB + - (bool) EXPECT_NEIGH: if we expect a neighbor entry in ASIC_DB + - (bool) REAL_MAC: If a real MAC address is expected in the + APPL_DB neighbor table entry, as opposed + to a zero/empty MAC + +All expected result attributes will be verified agains the DVS +after each test step is executed + +Note: EXPECT_ROUTE and EXPECT_NEIGH cannot both be True + +Note: for the purposes of this test, there is a distinction made + between 'server' IPs and 'neighbor' IPs. Server IPs are + IP addresses explicitly configured on a specific mux cable + interface in the MUX_CABLE table in config DB. Neighbor IPs + are any other IPs within the VLAN subnet. + + +""" + +__all__ = [ + 'TEST_ACTION', 'EXPECTED_RESULT', 'ACTIVE', 'STANDBY', 'PING_SERV', 'PING_NEIGH', + 'RESOLVE_ENTRY', 'DELETE_ENTRY', 'EXPECT_ROUTE', 'EXPECT_NEIGH', 'REAL_MAC', + 'INTF', 'IP', 'MAC', 'NEIGH_MISS_TESTS' +] + +TEST_ACTION = 'action' +EXPECTED_RESULT = 'result' + +# Possible test actions +ACTIVE = 'active' # Switch the test interface to active +STANDBY = 'standby' # Switch the test interface to standby +PING_SERV = 'ping_serv' # Ping the server mux cable IP, used to trigger a netlink fail message +PING_NEIGH = 'ping_neigh' # Ping the neighbor IP (not configured on a specific mux cable port) +RESOLVE_ENTRY = 'resolve_entry' # Resolve the test IP neighbor entry in the kernel +DELETE_ENTRY = 'delete_entry' # Delete the test IP neighbor entry from the kernel + +# Test expectations +EXPECT_ROUTE = 'expect_route' +EXPECT_NEIGH = 'expect_neigh' +REAL_MAC = 'real_mac' + +INTF = 'intf' +IP = 'ip' +MAC = 'mac' + +# Note: For most test cases below, after the neighbor entry is deleted, we must +# still set `REAL_MAC` to `True` in the expected result since a prior step in the +# test should have resolved the neighbor entry and confirmed that the APPL_DB +# neighbor entry contained a real MAC address. Thus, when we verify that APPL_DB +# no longer contains a neighbor table entry, we need to check for the real MAC. +# The exception to this is test cases where the neighbor entry is never resolved +# in the kernel. In that case, APPL_DB will never contain the real MAC address. + +STANDBY_MUX_CABLE_TESTS = [ + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] +] + +ACTIVE_MUX_CABLE_TESTS = [ + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] +] + +NEIGH_IP_TESTS = [ + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] + +] + +NEIGH_MISS_TESTS = ACTIVE_MUX_CABLE_TESTS + STANDBY_MUX_CABLE_TESTS + NEIGH_IP_TESTS diff --git a/tests/test_acl.py b/tests/test_acl.py index 5c542193f79a..ac7e7fda87a2 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -25,7 +25,7 @@ PFCWD_TABLE_NAME = "PFCWD_TEST" PFCWD_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] class TestAcl: - @pytest.yield_fixture + @pytest.fixture def l3_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS) @@ -34,7 +34,7 @@ def l3_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(L3_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def l3v6_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(L3V6_TABLE_NAME, @@ -45,7 +45,7 @@ def l3v6_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(L3V6_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def mclag_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(MCLAG_TABLE_NAME, MCLAG_TABLE_TYPE, MCLAG_BIND_PORTS) @@ -54,7 +54,7 @@ def mclag_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(MCLAG_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def mirror_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(MIRROR_TABLE_NAME, MIRROR_TABLE_TYPE, MIRROR_BIND_PORTS) @@ -72,7 +72,7 @@ def pfcwd_acl_table(self, dvs_acl, request): dvs_acl.remove_acl_table(PFCWD_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def setup_teardown_neighbor(self, dvs): try: # NOTE: set_interface_status has a dependency on cdb within dvs, diff --git a/tests/test_acl_egress_table.py b/tests/test_acl_egress_table.py index 01800d6b206e..0697dae6ee62 100644 --- a/tests/test_acl_egress_table.py +++ b/tests/test_acl_egress_table.py @@ -20,7 +20,7 @@ class TestEgressAclTable: - @pytest.yield_fixture + @pytest.fixture def egress_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index f0a57899e088..0b4177b64cbc 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -5,7 +5,7 @@ from dvslib.dvs_common import PollingConfig -@pytest.yield_fixture +@pytest.fixture def dynamic_buffer(dvs): buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) yield diff --git a/tests/test_mux.py b/tests/test_mux.py index 3913f77d2b16..71193735c912 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -2,16 +2,19 @@ import pytest import json +from ipaddress import ip_network, ip_address, IPv4Address from swsscommon import swsscommon +from mux_neigh_miss_tests import * def create_fvs(**kwargs): return swsscommon.FieldValuePairs(list(kwargs.items())) tunnel_nh_id = 0 -class TestMuxTunnelBase(object): +class TestMuxTunnelBase(): APP_MUX_CABLE = "MUX_CABLE_TABLE" + APP_NEIGH_TABLE = "NEIGH_TABLE" APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" @@ -20,20 +23,58 @@ class TestMuxTunnelBase(object): ASIC_NEIGH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + ASIC_FDB_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY" + ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" CONFIG_MUX_CABLE = "MUX_CABLE" + CONFIG_PEER_SWITCH = "PEER_SWITCH" + STATE_FDB_TABLE = "FDB_TABLE" + MUX_TUNNEL_0 = "MuxTunnel0" + PEER_SWITCH_HOST = "peer_switch_hostname" CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" + SELF_IPV4 = "10.1.0.32" + PEER_IPV4 = "10.1.0.33" SERV1_IPV4 = "192.168.0.100" SERV1_IPV6 = "fc02:1000::100" - SERV1_SOC_IPV4 = "192.168.0.102" + SERV1_SOC_IPV4 = "192.168.0.103" SERV2_IPV4 = "192.168.0.101" SERV2_IPV6 = "fc02:1000::101" + SERV3_IPV4 = "192.168.0.102" + SERV3_IPV6 = "fc02:1000::102" + NEIGH1_IPV4 = "192.168.0.200" + NEIGH1_IPV6 = "fc02:1000::200" + NEIGH2_IPV4 = "192.168.0.201" + NEIGH2_IPV6 = "fc02:1000::201" + NEIGH3_IPV4 = "192.168.0.202" + NEIGH3_IPV6 = "fc02:1000::202" IPV4_MASK = "/32" IPV6_MASK = "/128" TUNNEL_NH_ID = 0 ACL_PRIORITY = "999" + VLAN_1000 = "Vlan1000" + + PING_CMD = "timeout 0.5 ping -c1 -W1 -i0 -n -q {ip}" + + SAI_ROUTER_INTERFACE_ATTR_TYPE = "SAI_ROUTER_INTERFACE_ATTR_TYPE" + SAI_ROUTER_INTERFACE_TYPE_VLAN = "SAI_ROUTER_INTERFACE_TYPE_VLAN" + + DEFAULT_TUNNEL_PARAMS = { + "tunnel_type": "IPINIP", + "dst_ip": SELF_IPV4, + "dscp_mode": "pipe", + "ecn_mode": "standard", + "ttl_mode": "pipe", + "encap_tc_to_queue_map": TUNNEL_QOS_MAP_NAME, + "encap_tc_to_dscp_map": TUNNEL_QOS_MAP_NAME, + "decap_dscp_to_tc_map": TUNNEL_QOS_MAP_NAME, + "decap_tc_to_pg_map": TUNNEL_QOS_MAP_NAME + } + + DEFAULT_PEER_SWITCH_PARAMS = { + "address_ipv4": PEER_IPV4 + } ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", @@ -49,43 +90,48 @@ class TestMuxTunnelBase(object): "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } - + TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} - - def create_vlan_interface(self, confdb, asicdb, dvs): + + def create_vlan_interface(self, dvs): + confdb = dvs.get_config_db() fvs = {"vlanid": "1000"} - confdb.create_entry("VLAN", "Vlan1000", fvs) + confdb.create_entry("VLAN", self.VLAN_1000, fvs) fvs = {"tagging_mode": "untagged"} confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet0", fvs) confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet4", fvs) + confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet8", fvs) fvs = {"NULL": "NULL"} - confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs) + confdb.create_entry("VLAN_INTERFACE", self.VLAN_1000, fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs) dvs.port_admin_set("Ethernet0", "up") dvs.port_admin_set("Ethernet4", "up") - + dvs.port_admin_set("Ethernet8", "up") def create_mux_cable(self, confdb): - fvs = { "server_ipv4":self.SERV1_IPV4 + self.IPV4_MASK, "server_ipv6":self.SERV1_IPV6 + self.IPV6_MASK, "soc_ipv4": self.SERV1_SOC_IPV4 + self.IPV4_MASK, - "cable_type": "active-active" + "cable_type": "active-active" # "cable_type" is not used by orchagent, this is a dummy value } confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet0", fvs) - fvs = { "server_ipv4":self.SERV2_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV2_IPV6+self.IPV6_MASK } + fvs = {"server_ipv4": self.SERV2_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV2_IPV6+self.IPV6_MASK} confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet4", fvs) + fvs = {"server_ipv4": self.SERV3_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV3_IPV6+self.IPV6_MASK} + confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet8", fvs) def set_mux_state(self, appdb, ifname, state_change): @@ -97,22 +143,46 @@ def set_mux_state(self, appdb, ifname, state_change): time.sleep(1) + def get_switch_oid(self, asicdb): + # Assumes only one switch is ever present + keys = asicdb.wait_for_n_keys(self.ASIC_SWITCH_TABLE, 1) + return keys[0] + + def get_vlan_rif_oid(self, asicdb): + # create_vlan_interface should be called before this method + # Assumes only one VLAN RIF is present + rifs = asicdb.get_keys(self.ASIC_RIF_TABLE) + + vlan_oid = '' + for rif_key in rifs: + entry = asicdb.get_entry(self.ASIC_RIF_TABLE, rif_key) + if entry[self.SAI_ROUTER_INTERFACE_ATTR_TYPE] == self.SAI_ROUTER_INTERFACE_TYPE_VLAN: + vlan_oid = rif_key + break + + return vlan_oid - def check_neigh_in_asic_db(self, asicdb, ip, expected=1): + def check_neigh_in_asic_db(self, asicdb, ip, expected=True): + rif_oid = self.get_vlan_rif_oid(asicdb) + switch_oid = self.get_switch_oid(asicdb) + neigh_key_map = { + "ip": ip, + "rif": rif_oid, + "switch_id": switch_oid + } + expected_key = json.dumps(neigh_key_map, sort_keys=True, separators=(',', ':')) - nbr = asicdb.wait_for_n_keys(self.ASIC_NEIGH_TABLE, expected) + if expected: + nbr_keys = asicdb.wait_for_matching_keys(self.ASIC_NEIGH_TABLE, [expected_key]) - found = False - for key in nbr: - entry = json.loads(key) - if entry["ip"] == ip: - found = True - entry = key - break + for key in nbr_keys: + if ip in key: + return key - assert found - return entry + else: + asicdb.wait_for_deleted_keys(self.ASIC_NEIGH_TABLE, [expected_key]) + return '' def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): @@ -127,7 +197,6 @@ def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): assert tunnel_nh_id - def check_nexthop_in_asic_db(self, asicdb, key, standby=False): fvs = asicdb.get_entry(self.ASIC_ROUTE_TABLE, key) @@ -140,7 +209,6 @@ def check_nexthop_in_asic_db(self, asicdb, key, standby=False): else: assert (nhid != tunnel_nh_id) - def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): fvs = asicdb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", key) @@ -160,18 +228,19 @@ def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): # Count the number of Nexthop member pointing to tunnel if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] == tunnel_nh_id: - count += 1 + count += 1 assert num_tnl_nh == count - - def add_neighbor(self, dvs, ip, mac, v6=False): - - if v6: + def add_neighbor(self, dvs, ip, mac): + if ip_address(ip).version == 6: dvs.runcmd("ip -6 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") else: dvs.runcmd("ip -4 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") + def del_neighbor(self, dvs, ip): + cmd = 'ip neigh del {} dev {}'.format(ip, self.VLAN_1000) + dvs.runcmd(cmd) def add_fdb(self, dvs, port, mac): @@ -193,32 +262,29 @@ def del_fdb(self, dvs, mac): def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): - self.create_vlan_interface(confdb, asicdb, dvs) - - self.create_mux_cable(confdb) - self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "standby") self.add_neighbor(dvs, self.SERV1_IPV4, "00:00:00:00:00:01") - # Broadcast neigh 192.168.0.255 is default added. Hence +1 for expected number - srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4, 2) + srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4) - self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01", True) - srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6, 3) + self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01") + srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6) self.add_neighbor(dvs, self.SERV1_SOC_IPV4, "00:00:00:00:00:01") - self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4, 4) + self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4) existing_keys = asicdb.get_keys(self.ASIC_NEIGH_TABLE) self.add_neighbor(dvs, self.SERV2_IPV4, "00:00:00:00:00:02") - self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02", True) + self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02") time.sleep(1) # In standby mode, the entry must not be added to Neigh table but Route asicdb.wait_for_matching_keys(self.ASIC_NEIGH_TABLE, existing_keys) - dvs_route.check_asicdb_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) + dvs_route.check_asicdb_route_entries( + [self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK] + ) # The first standby route also creates as tunnel Nexthop self.check_tnl_nexthop_in_asic_db(asicdb, 4) @@ -228,17 +294,20 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v4) asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v6) - dvs_route.check_asicdb_route_entries([self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4, 2) + dvs_route.check_asicdb_route_entries( + [self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK] + ) + self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4) dvs_route.check_asicdb_deleted_route_entries([self.SERV1_SOC_IPV4+self.IPV4_MASK]) # Change state to Active. This will add Neigh and delete Route self.set_mux_state(appdb, "Ethernet4", "active") - dvs_route.check_asicdb_deleted_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4, 4) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6, 4) - + dvs_route.check_asicdb_deleted_route_entries( + [self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK] + ) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6) def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): @@ -251,27 +320,27 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): ip_1 = "fc02:1000::10" ip_2 = "fc02:1000::11" - self.add_neighbor(dvs, ip_1, "00:00:00:00:00:11", True) - self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12", True) + self.add_neighbor(dvs, ip_1, "00:00:00:00:00:11") + self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12") # ip_1 is on Active Mux, hence added to Host table - self.check_neigh_in_asic_db(asicdb, ip_1, 5) + self.check_neigh_in_asic_db(asicdb, ip_1) # ip_2 is on Standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_2+self.IPV6_MASK]) # Check ip_1 move to standby mux, should be pointing to tunnel - self.add_neighbor(dvs, ip_1, "00:00:00:00:00:12", True) + self.add_neighbor(dvs, ip_1, "00:00:00:00:00:12") # ip_1 moved to standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_1+self.IPV6_MASK]) # Check ip_2 move to active mux, should be host entry - self.add_neighbor(dvs, ip_2, "00:00:00:00:00:11", True) + self.add_neighbor(dvs, ip_2, "00:00:00:00:00:11") # ip_2 moved to active Mux, hence remove from Route table dvs_route.check_asicdb_deleted_route_entries([ip_2+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, ip_2, 5) + self.check_neigh_in_asic_db(asicdb, ip_2) # Simulate FDB aging out test case ip_3 = "192.168.0.200" @@ -291,13 +360,18 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([ip_3+self.IPV4_MASK]) + self.del_fdb(dvs, "00-00-00-00-00-11") + def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet0", "active") rtprefix = "2.3.4.0/24" - dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + " " + self.SERV1_IPV4 + "\"") + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + + " " + self.SERV1_IPV4 + "\"" + ) pdb = dvs.get_app_db() pdb.wait_for_entry("ROUTE_TABLE", rtprefix) @@ -350,7 +424,12 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), ("ifname", "Vlan1000,Vlan1000")]) + fvs = swsscommon.FieldValuePairs( + [ + ("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), + ("ifname", "Vlan1000,Vlan1000") + ] + ) ps.set(rtprefix, fvs) @@ -388,7 +467,12 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")]) + fvs = swsscommon.FieldValuePairs( + [ + ("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), + ("ifname", "tun0,tun0") + ] + ) ps.set(rtprefix, fvs) @@ -414,6 +498,7 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "standby") self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + ps._del(rtprefix) def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = { @@ -423,12 +508,12 @@ def get_expected_sai_qualifiers(self, portlist, dvs_acl): return expected_sai_qualifiers - - def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): + def create_and_test_acl(self, appdb, dvs_acl): # Start with active, verify NO ACL rules exists self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "active") dvs_acl.verify_no_acl_rules() @@ -439,7 +524,7 @@ def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): # Set two mux ports to standby, verify ACL rule with inport bitmap (2 ports) self.set_mux_state(appdb, "Ethernet4", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Set one mux port to active, verify ACL rule with inport bitmap (1 port) @@ -458,7 +543,7 @@ def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): # Verify change while setting unknown from active self.set_mux_state(appdb, "Ethernet4", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) self.set_mux_state(appdb, "Ethernet0", "active") @@ -466,16 +551,15 @@ def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) self.set_mux_state(appdb, "Ethernet0", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Verify no change while setting unknown from standby self.set_mux_state(appdb, "Ethernet0", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - - def create_and_test_metrics(self, appdb, statedb, dvs): + def create_and_test_metrics(self, appdb, statedb): # Set to active and test attributes for start and end time self.set_mux_state(appdb, "Ethernet0", "active") @@ -489,7 +573,7 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert fvs != {} start = end = False - for f,v in fvs.items(): + for f, _ in fvs.items(): if f == "orch_switch_active_start": start = True elif f == "orch_switch_active_end": @@ -511,7 +595,7 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert fvs != {} start = end = False - for f,v in fvs.items(): + for f, v in fvs.items(): if f == "orch_switch_standby_start": start = True elif f == "orch_switch_standby_end": @@ -520,26 +604,17 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert start assert end - def check_interface_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_RIF_TABLE, sai_oid) return True - def check_vr_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_VRF_TABLE, sai_oid) return True - - def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dscp_map_oid=None, tc_to_queue_map_oid=None): + def create_and_test_peer(self, asicdb, tc_to_dscp_map_oid=None, tc_to_queue_map_oid=None): """ Create PEER entry verify all needed enties in ASIC DB exists """ - peer_attrs = { - "address_ipv4": peer_ip - } - - db.create_entry("PEER_SWITCH", peer_name, peer_attrs) - # check asic db table # There will be two tunnels, one P2MP and another P2P tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 2) @@ -569,9 +644,9 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == src_ip + assert value == self.SELF_IPV4 elif field == "SAI_TUNNEL_ATTR_ENCAP_DST_IP": - assert value == peer_ip + assert value == self.PEER_IPV4 elif field == "SAI_TUNNEL_ATTR_PEER_MODE": assert value == "SAI_TUNNEL_PEER_MODE_P2P" elif field == "SAI_TUNNEL_ATTR_OVERLAY_INTERFACE": @@ -591,7 +666,6 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc else: assert False, "Field %s is not tested" % field - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_entries = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TERM_ENTRIES, len(dst_ips)) expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" @@ -617,33 +691,22 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid else: assert False, "Field %s is not tested" % field - - def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): + def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs + is_symmetric_tunnel = "src_ip" in tunnel_params # 6 parameters to check in case of decap tunnel # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel expected_len = 7 if is_symmetric_tunnel else 6 - if 'decap_tc_to_pg_map_id' in kwargs: + if 'decap_tc_to_pg_map_id' in tunnel_params: expected_len += 1 - decap_tc_to_pg_map_id = kwargs.pop('decap_tc_to_pg_map_id') + decap_tc_to_pg_map_id = tunnel_params.pop('decap_tc_to_pg_map_id') - if 'decap_dscp_to_tc_map_id' in kwargs: + if 'decap_dscp_to_tc_map_id' in tunnel_params: expected_len += 1 - decap_dscp_to_tc_map_id = kwargs.pop('decap_dscp_to_tc_map_id') - - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - - fvs = create_fvs(**kwargs) - - ps.set(tunnel_name, fvs) - - # wait till config will be applied - time.sleep(1) + decap_dscp_to_tc_map_id = tunnel_params.pop('decap_dscp_to_tc_map_id') # check asic db table tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 1) @@ -654,16 +717,16 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert len(fvs) == expected_len - expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] - expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] - expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] + expected_ecn_mode = self.ecn_modes_map[tunnel_params["ecn_mode"]] + expected_dscp_mode = self.dscp_modes_map[tunnel_params["dscp_mode"]] + expected_ttl_mode = self.ttl_modes_map[tunnel_params["ttl_mode"]] for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == kwargs["src_ip"] + assert value == tunnel_params["src_ip"] elif field == "SAI_TUNNEL_ATTR_DECAP_ECN_MODE": assert value == expected_ecn_mode elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": @@ -680,10 +743,11 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert value == decap_tc_to_pg_map_id else: assert False, "Field %s is not tested" % field - src_ip = kwargs['src_ip'] if 'src_ip' in kwargs else None - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) + src_ip = tunnel_params['src_ip'] if 'src_ip' in tunnel_params else None + self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, tunnel_params["dst_ip"].split(","), src_ip) + def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -697,7 +761,7 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): status, fvs = tunnel_table.get(tunnel_sai_obj) # get overlay loopback interface oid to check if it is deleted with the tunnel - overlay_infs_id = {f:v for f,v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] + overlay_infs_id = {f:v for f, v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) ps.set(tunnel_name, create_fvs(), 'DEL') @@ -710,6 +774,22 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def check_app_db_neigh_table( + self, appdb, intf, neigh_ip, + mac="00:00:00:00:00:00", expect_entry=True + ): + key = "{}:{}".format(intf, neigh_ip) + if isinstance(ip_address(neigh_ip), IPv4Address): + family = 'IPv4' + else: + family = 'IPv6' + + if expect_entry: + appdb.wait_for_matching_keys(self.APP_NEIGH_TABLE, [key]) + appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'family': family}) + appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'neigh': mac}) + else: + appdb.wait_for_deleted_keys(self.APP_NEIGH_TABLE, key) def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) # Apply QoS map to config db @@ -743,6 +823,162 @@ def cleanup_left_over(self, db, asicdb): for key in tunnel_app_table.getKeys(): tunnel_table._del(key) + def ping_ip(self, dvs, ip): + dvs.runcmd(self.PING_CMD.format(ip=ip)) + + def check_neighbor_state( + self, dvs, dvs_route, neigh_ip, expect_route=True, + expect_neigh=False, expected_mac='00:00:00:00:00:00' + ): + """ + Checks the status of neighbor entries in APPL and ASIC DB + """ + if expect_route and expect_neigh: + pytest.fail('expect_routes and expect_neigh cannot both be True') + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + prefix = str(ip_network(neigh_ip)) + self.check_app_db_neigh_table( + app_db, self.VLAN_1000, neigh_ip, + mac=expected_mac, expect_entry=expect_route + ) + if expect_route: + self.check_tnl_nexthop_in_asic_db(asic_db) + routes = dvs_route.check_asicdb_route_entries([prefix]) + for route in routes: + self.check_nexthop_in_asic_db(asic_db, route, standby=expect_route) + else: + dvs_route.check_asicdb_deleted_route_entries([prefix]) + self.check_neigh_in_asic_db(asic_db, neigh_ip, expected=expect_neigh) + + def execute_action(self, action, dvs, test_info): + if action in (PING_SERV, PING_NEIGH): + self.ping_ip(dvs, test_info[IP]) + elif action in (ACTIVE, STANDBY): + app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + self.set_mux_state(app_db_connector, test_info[INTF], action) + elif action == RESOLVE_ENTRY: + self.add_neighbor(dvs, test_info[IP], test_info[MAC]) + elif action == DELETE_ENTRY: + self.del_neighbor(dvs, test_info[IP]) + else: + pytest.fail('Invalid test action {}'.format(action)) + + @pytest.fixture(scope='module') + def setup_vlan(self, dvs): + self.create_vlan_interface(dvs) + + @pytest.fixture(scope='module') + def setup_mux_cable(self, dvs): + config_db = dvs.get_config_db() + self.create_mux_cable(config_db) + + @pytest.fixture(scope='module') + def setup_tunnel(self, dvs): + app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + ps = swsscommon.ProducerStateTable(app_db_connector, self.APP_TUNNEL_DECAP_TABLE_NAME) + fvs = create_fvs(**self.DEFAULT_TUNNEL_PARAMS) + ps.set(self.MUX_TUNNEL_0, fvs) + + @pytest.fixture + def setup_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.create_entry( + self.CONFIG_PEER_SWITCH, + self.PEER_SWITCH_HOST, + self.DEFAULT_PEER_SWITCH_PARAMS + ) + + @pytest.fixture + def remove_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.delete_entry(self.CONFIG_PEER_SWITCH, self.PEER_SWITCH_HOST) + + @pytest.fixture(params=['IPv4', 'IPv6']) + def ip_version(self, request): + return request.param + + def clear_neighbors(self, dvs): + _, neighs_str = dvs.runcmd('ip neigh show all') + neighs = [entry.split()[0] for entry in neighs_str.split('\n')[:-1]] + + for neigh in neighs: + self.del_neighbor(dvs, neigh) + + @pytest.fixture + def neighbor_cleanup(self, dvs): + """ + Ensures that all kernel neighbors are removed before and after tests + """ + self.clear_neighbors(dvs) + yield + self.clear_neighbors(dvs) + + @pytest.fixture + def server_test_ips(self, ip_version): + if ip_version == 'IPv4': + return [self.SERV1_IPV4, self.SERV2_IPV4, self.SERV3_IPV4] + else: + return [self.SERV1_IPV6, self.SERV2_IPV6, self.SERV3_IPV6] + + @pytest.fixture + def neigh_test_ips(self, ip_version): + if ip_version == 'IPv4': + return [self.NEIGH1_IPV4, self.NEIGH2_IPV4, self.NEIGH3_IPV4] + else: + return [self.NEIGH1_IPV6, self.NEIGH2_IPV6, self.NEIGH3_IPV6] + + @pytest.fixture + def ips_for_test(self, server_test_ips, neigh_test_ips, neigh_miss_test_sequence): + # Assumes that each test sequence has at exactly one of + # PING_NEIGH OR PING_SERV as a step + for step in neigh_miss_test_sequence: + if step[TEST_ACTION] == PING_SERV: + return server_test_ips + if step[TEST_ACTION] == PING_NEIGH: + return neigh_test_ips + + # If we got here, the test sequence did not contain a ping command + pytest.fail('No ping command found in test sequence {}'.format(neigh_miss_test_sequence)) + + @pytest.fixture + def ip_to_intf_map(self, server_test_ips, neigh_test_ips): + map = { + server_test_ips[0]: 'Ethernet0', + server_test_ips[1]: 'Ethernet4', + server_test_ips[2]: 'Ethernet8', + neigh_test_ips[0]: 'Ethernet0', + neigh_test_ips[1]: 'Ethernet4', + neigh_test_ips[2]: 'Ethernet8' + } + return map + + @pytest.fixture( + params=NEIGH_MISS_TESTS, + ids=['->'.join([step[TEST_ACTION] for step in scenario]) + for scenario in NEIGH_MISS_TESTS] + ) + def neigh_miss_test_sequence(self, request): + return request.param + + @pytest.fixture + def intf_fdb_map(self, dvs, setup_vlan): + """ + Note: this fixture invokes the setup_vlan fixture so that + the interfaces are brought up before attempting to access FDB information + """ + state_db = dvs.get_state_db() + keys = state_db.get_keys(self.STATE_FDB_TABLE) + + fdb_map = {} + for key in keys: + entry = state_db.get_entry(self.STATE_FDB_TABLE, key) + mac = key.replace('{}:'.format(self.VLAN_1000), '') + port = entry['port'] + fdb_map[port] = mac + + return fdb_map + class TestMuxTunnel(TestMuxTunnelBase): """ Tests for Mux tunnel creation and removal """ @@ -765,78 +1001,109 @@ def setup(self, dvs): self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) - def test_Tunnel(self, dvs, testlog, setup): + def test_Tunnel(self, dvs, setup_tunnel, testlog, setup): """ test IPv4 Mux tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() #self.cleanup_left_over(db, asicdb) _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup + tunnel_params = self.DEFAULT_TUNNEL_PARAMS + tunnel_params["decap_dscp_to_tc_map_id"] = dscp_to_tc_map_oid + tunnel_params["decap_tc_to_pg_map_id"] = tc_to_pg_map_oid + # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", - src_ip="10.1.0.33", dst_ip="10.1.0.32", dscp_mode="pipe", - ecn_mode="standard", ttl_mode="pipe", - encap_tc_to_queue_map=self.TUNNEL_QOS_MAP_NAME, - encap_tc_to_dscp_map=self.TUNNEL_QOS_MAP_NAME, - decap_dscp_to_tc_map=self.TUNNEL_QOS_MAP_NAME, - decap_dscp_to_tc_map_id = dscp_to_tc_map_oid, - decap_tc_to_pg_map=self.TUNNEL_QOS_MAP_NAME, - decap_tc_to_pg_map_id=tc_to_pg_map_oid) - - - def test_Peer(self, dvs, testlog, setup): + self.create_and_test_tunnel(db, asicdb, self.MUX_TUNNEL_0, tunnel_params) + + def test_Peer(self, dvs, setup_peer_switch, setup_tunnel, setup, testlog): + """ test IPv4 Mux tunnel creation """ - db = dvs.get_config_db() asicdb = dvs.get_asic_db() encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id, _, _ = setup - self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) + self.create_and_test_peer(asicdb, encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) - def test_Neighbor(self, dvs, dvs_route, testlog): + def test_Neighbor(self, dvs, dvs_route, setup_vlan, setup_mux_cable, testlog): """ test Neighbor entries and mux state change """ confdb = dvs.get_config_db() - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_neighbor(confdb, appdb, asicdb, dvs, dvs_route) - def test_Fdb(self, dvs, dvs_route, testlog): """ test Fdb entries and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_fdb(appdb, asicdb, dvs, dvs_route) - def test_Route(self, dvs, dvs_route, testlog): """ test Route entries and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_route(appdb, asicdb, dvs, dvs_route) - def test_acl(self, dvs, dvs_acl, testlog): """ test acl and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - asicdb = dvs.get_asic_db() + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - self.create_and_test_acl(appdb, asicdb, dvs, dvs_acl) + self.create_and_test_acl(appdb, dvs_acl) def test_mux_metrics(self, dvs, testlog): """ test metrics for mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) statedb = dvs.get_state_db() - self.create_and_test_metrics(appdb, statedb, dvs) + self.create_and_test_metrics(appdb, statedb) + + def test_neighbor_miss( + self, dvs, dvs_route, ips_for_test, neigh_miss_test_sequence, + ip_to_intf_map, intf_fdb_map, neighbor_cleanup, setup_vlan, + setup_mux_cable, setup_tunnel, setup_peer_switch, testlog + ): + ip = ips_for_test[0] + intf = ip_to_intf_map[ip] + mac = intf_fdb_map[intf] + test_info = { + IP: ip, + INTF: intf, + MAC: mac + } + + for step in neigh_miss_test_sequence: + self.execute_action(step[TEST_ACTION], dvs, test_info) + exp_result = step[EXPECTED_RESULT] + self.check_neighbor_state( + dvs, dvs_route, ip, + expect_route=exp_result[EXPECT_ROUTE], + expect_neigh=exp_result[EXPECT_NEIGH], + expected_mac=mac if exp_result[REAL_MAC] else '00:00:00:00:00:00' + ) + + def test_neighbor_miss_no_peer( + self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, + remove_peer_switch, neighbor_cleanup, testlog + ): + """ + test neighbor miss with no peer switch configured + No new entries are expected in APPL_DB or ASIC_DB + """ + test_ips = [self.NEIGH3_IPV4, self.SERV3_IPV4, self.NEIGH1_IPV6, self.SERV1_IPV6] + + for ip in test_ips: + self.ping_ip(dvs, ip) + + for ip in test_ips: + self.check_neighbor_state(dvs, dvs_route, ip, expect_route=False) # Add Dummy always-pass test at end as workaroud diff --git a/tests/test_nhg.py b/tests/test_nhg.py index 94d581b47cf4..aab088deb28d 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -128,7 +128,7 @@ def peer_ip(self, i): return "10.0.0." + str(i * 2 + 1) def port_mac(self, i): - return "00:00:00:00:00:0" + str(i) + return "00:00:00:00:00:0" + str(i + 1) def config_intf(self, i): fvs = {'NULL': 'NULL'} diff --git a/tests/test_port_config.py b/tests/test_port_config.py index d584899e9763..b6f51e4e860f 100644 --- a/tests/test_port_config.py +++ b/tests/test_port_config.py @@ -7,7 +7,7 @@ from dvslib.dvs_common import wait_for_result, PollingConfig -@pytest.yield_fixture +@pytest.fixture def port_config(request, dvs): file_name = "/usr/share/sonic/hwsku/port_config.ini" dvs.runcmd("cp %s %s.bak" % (file_name, file_name))