From ef9dcdbec9b5344299a7bb0516a0377da5d0e793 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Thu, 30 May 2024 04:07:25 -0700 Subject: [PATCH 01/14] Handle learning duplicate IPs on different VRFs (#3165) - What I did Fixes sonic-net/sonic-buildimage#18890 If we try to learn an existing neighbor on a different VLAN in the same VRF, delete the old neighbor entry before creating the new one. For all other scenarios, proceed with neighbor learning normally. - Why I did it Allow learning the same IP in two different VRFs - How I verified it Run the C++ unit tests Signed-off-by: Lawrence Lee Co-authored-by: Prince Sunny --- orchagent/neighorch.cpp | 30 +++++- tests/mock_tests/mock_orch_test.h | 8 ++ tests/mock_tests/neighorch_ut.cpp | 164 +++++++++++++++++++++--------- 3 files changed, 149 insertions(+), 53 deletions(-) diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index a2bdebbc62..6e752ba09c 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -945,12 +945,36 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress NeighborEntry temp_entry = { ip_address, vlan_port }; if (m_syncdNeighbors.find(temp_entry) != m_syncdNeighbors.end()) { - SWSS_LOG_NOTICE("Neighbor %s on %s already exists, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); - if (!removeNeighbor(temp_entry)) + // Neighbor already exists on another VLAN. If they belong to the same VRF, delete the old neighbor + Port existing_vlan, new_vlan; + if (!gPortsOrch->getPort(vlan_port, new_vlan)) { - SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + SWSS_LOG_ERROR("Failed to get port for %s", vlan_port.c_str()); return false; } + if (!gPortsOrch->getPort(alias, existing_vlan)) + { + SWSS_LOG_ERROR("Failed to get port for %s", alias.c_str()); + return false; + } + if (existing_vlan.m_vr_id == new_vlan.m_vr_id) + { + std::string vrf_name = gDirectory.get()->getVRFname(existing_vlan.m_vr_id); + if (vrf_name.empty()) + { + SWSS_LOG_NOTICE("Neighbor %s already learned on %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); + } + else + { + SWSS_LOG_NOTICE("Neighbor %s already learned on %s in VRF %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str(), vrf_name.c_str()); + } + + if (!removeNeighbor(temp_entry)) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + return false; + } + } } } diff --git a/tests/mock_tests/mock_orch_test.h b/tests/mock_tests/mock_orch_test.h index f0e022a7bc..fe6d3a0e07 100644 --- a/tests/mock_tests/mock_orch_test.h +++ b/tests/mock_tests/mock_orch_test.h @@ -19,16 +19,24 @@ namespace mock_orch_test static const string PEER_IPV4_ADDRESS = "1.1.1.1"; static const string ACTIVE_INTERFACE = "Ethernet4"; static const string STANDBY_INTERFACE = "Ethernet8"; + static const string ETHERNET0 = "Ethernet0"; + static const string ETHERNET4 = "Ethernet4"; + static const string ETHERNET8 = "Ethernet8"; + static const string ETHERNET12 = "Ethernet12"; static const string ACTIVE_STATE = "active"; static const string STANDBY_STATE = "standby"; static const string STATE = "state"; static const string VLAN_1000 = "Vlan1000"; static const string VLAN_2000 = "Vlan2000"; + static const string VLAN_3000 = "Vlan3000"; + static const string VLAN_4000 = "Vlan4000"; static const string SERVER_IP1 = "192.168.0.2"; static const string SERVER_IP2 = "192.168.0.3"; static const string MAC1 = "62:f9:65:10:2f:01"; static const string MAC2 = "62:f9:65:10:2f:02"; static const string MAC3 = "62:f9:65:10:2f:03"; + static const string MAC4 = "62:f9:65:10:2f:04"; + static const string MAC5 = "62:f9:65:10:2f:05"; class MockOrchTest: public ::testing::Test { diff --git a/tests/mock_tests/neighorch_ut.cpp b/tests/mock_tests/neighorch_ut.cpp index d82e10d987..13e4ead4b0 100644 --- a/tests/mock_tests/neighorch_ut.cpp +++ b/tests/mock_tests/neighorch_ut.cpp @@ -9,7 +9,6 @@ #include "mock_sai_api.h" #include "mock_orch_test.h" - EXTERN_MOCK_FNS namespace neighorch_test @@ -21,15 +20,18 @@ namespace neighorch_test using ::testing::Throw; static const string TEST_IP = "10.10.10.10"; - static const NeighborEntry VLAN1000_NEIGH = NeighborEntry(TEST_IP, VLAN_1000); + static const string VRF_3000 = "Vrf3000"; + static const NeighborEntry VLAN1000_NEIGH = NeighborEntry(TEST_IP, VLAN_1000); static const NeighborEntry VLAN2000_NEIGH = NeighborEntry(TEST_IP, VLAN_2000); + static const NeighborEntry VLAN3000_NEIGH = NeighborEntry(TEST_IP, VLAN_3000); + static const NeighborEntry VLAN4000_NEIGH = NeighborEntry(TEST_IP, VLAN_4000); - class NeighOrchTest: public MockOrchTest + class NeighOrchTest : public MockOrchTest { protected: void SetAndAssertMuxState(std::string interface, std::string state) { - MuxCable* muxCable = m_MuxOrch->getMuxCable(interface); + MuxCable *muxCable = m_MuxOrch->getMuxCable(interface); muxCable->setState(state); EXPECT_EQ(state, muxCable->getState()); } @@ -46,35 +48,49 @@ namespace neighorch_test void ApplyInitialConfigs() { - Table peer_switch_table = Table(m_config_db.get(), CFG_PEER_SWITCH_TABLE_NAME); - Table decap_tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); - Table decap_term_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TERM_TABLE_NAME); - Table mux_cable_table = Table(m_config_db.get(), CFG_MUX_CABLE_TABLE_NAME); Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); Table intf_table = Table(m_app_db.get(), APP_INTF_TABLE_NAME); Table fdb_table = Table(m_app_db.get(), APP_FDB_TABLE_NAME); + Table vrf_table = Table(m_app_db.get(), APP_VRF_TABLE_NAME); auto ports = ut_helper::getInitialSaiPorts(); - port_table.set(ACTIVE_INTERFACE, ports[ACTIVE_INTERFACE]); - port_table.set(STANDBY_INTERFACE, ports[STANDBY_INTERFACE]); + port_table.set(ETHERNET0, ports[ETHERNET0]); + port_table.set(ETHERNET4, ports[ETHERNET4]); + port_table.set(ETHERNET8, ports[ETHERNET8]); port_table.set("PortConfigDone", { { "count", to_string(1) } }); port_table.set("PortInitDone", { {} }); + vrf_table.set(VRF_3000, { {"NULL", "NULL"} }); + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, { "mtu", "9100" }, { "mac", "00:aa:bb:cc:dd:ee" } }); - vlan_table.set(VLAN_2000, { { "admin_status", "up"}, + vlan_table.set(VLAN_2000, { { "admin_status", "up" }, { "mtu", "9100" }, { "mac", "aa:11:bb:22:cc:33" } }); + vlan_table.set(VLAN_3000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "99:ff:88:ee:77:dd" } }); + vlan_table.set(VLAN_4000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "99:ff:88:ee:77:dd" } }); + vlan_member_table.set( + VLAN_1000 + vlan_member_table.getTableNameSeparator() + ETHERNET0, + { { "tagging_mode", "untagged" } }); + + vlan_member_table.set( + VLAN_2000 + vlan_member_table.getTableNameSeparator() + ETHERNET4, + { { "tagging_mode", "untagged" } }); + vlan_member_table.set( - VLAN_1000 + vlan_member_table.getTableNameSeparator() + ACTIVE_INTERFACE, + VLAN_3000 + vlan_member_table.getTableNameSeparator() + ETHERNET8, { { "tagging_mode", "untagged" } }); vlan_member_table.set( - VLAN_2000 + vlan_member_table.getTableNameSeparator() + STANDBY_INTERFACE, + VLAN_4000 + vlan_member_table.getTableNameSeparator() + ETHERNET12, { { "tagging_mode", "untagged" } }); intf_table.set(VLAN_1000, { { "grat_arp", "enabled" }, @@ -85,6 +101,16 @@ namespace neighorch_test { "proxy_arp", "enabled" }, { "mac_addr", "00:00:00:00:00:00" } }); + intf_table.set(VLAN_3000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "vrf_name", VRF_3000 }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set(VLAN_4000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "vrf_name", VRF_3000 }, + { "mac_addr", "00:00:00:00:00:00" } }); + intf_table.set( VLAN_1000 + neigh_table.getTableNameSeparator() + "192.168.0.1/24", { { "scope", "global" }, @@ -96,65 +122,56 @@ namespace neighorch_test { "scope", "global" }, { "family", "IPv4" }, }); - decap_term_table.set( - MUX_TUNNEL + neigh_table.getTableNameSeparator() + "2.2.2.2", { { "src_ip", "1.1.1.1" }, - { "term_type", "P2P" } }); - - decap_tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, - { "src_ip", "1.1.1.1" }, - { "ecn_mode", "copy_from_outer" }, - { "encap_ecn_mode", "standard" }, - { "ttl_mode", "pipe" }, - { "tunnel_type", "IPINIP" } }); - - peer_switch_table.set(PEER_SWITCH_HOSTNAME, { { "address_ipv4", PEER_IPV4_ADDRESS } }); - - mux_cable_table.set(ACTIVE_INTERFACE, { { "server_ipv4", SERVER_IP1 + "/32" }, - { "server_ipv6", "a::a/128" }, - { "state", "auto" } }); + intf_table.set( + VLAN_3000 + neigh_table.getTableNameSeparator() + "192.168.3.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); - mux_cable_table.set(STANDBY_INTERFACE, { { "server_ipv4", SERVER_IP2+ "/32" }, - { "server_ipv6", "a::b/128" }, - { "state", "auto" } }); + intf_table.set( + VLAN_4000 + neigh_table.getTableNameSeparator() + "192.168.3.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); gPortsOrch->addExistingData(&port_table); gPortsOrch->addExistingData(&vlan_table); gPortsOrch->addExistingData(&vlan_member_table); static_cast(gPortsOrch)->doTask(); + gVrfOrch->addExistingData(&vrf_table); + static_cast(gVrfOrch)->doTask(); + gIntfsOrch->addExistingData(&intf_table); static_cast(gIntfsOrch)->doTask(); - m_TunnelDecapOrch->addExistingData(&decap_tunnel_table); - m_TunnelDecapOrch->addExistingData(&decap_term_table); - static_cast(m_TunnelDecapOrch)->doTask(); - - m_MuxOrch->addExistingData(&peer_switch_table); - static_cast(m_MuxOrch)->doTask(); - - m_MuxOrch->addExistingData(&mux_cable_table); - static_cast(m_MuxOrch)->doTask(); - fdb_table.set( VLAN_1000 + fdb_table.getTableNameSeparator() + MAC1, { { "type", "dynamic" }, - { "port", ACTIVE_INTERFACE } }); + { "port", ETHERNET0 } }); fdb_table.set( VLAN_2000 + fdb_table.getTableNameSeparator() + MAC2, { { "type", "dynamic" }, - { "port", STANDBY_INTERFACE} }); + { "port", ETHERNET4 } }); fdb_table.set( VLAN_1000 + fdb_table.getTableNameSeparator() + MAC3, { { "type", "dynamic" }, - { "port", ACTIVE_INTERFACE} }); + { "port", ETHERNET0 } }); + + fdb_table.set( + VLAN_3000 + fdb_table.getTableNameSeparator() + MAC4, + { { "type", "dynamic" }, + { "port", ETHERNET8 } }); + + fdb_table.set( + VLAN_4000 + fdb_table.getTableNameSeparator() + MAC5, + { { "type", "dynamic" }, + { "port", ETHERNET12 } }); gFdbOrch->addExistingData(&fdb_table); static_cast(gFdbOrch)->doTask(); - - SetAndAssertMuxState(ACTIVE_INTERFACE, ACTIVE_STATE); - SetAndAssertMuxState(STANDBY_INTERFACE, STANDBY_STATE); } void PostSetUp() override @@ -169,18 +186,19 @@ namespace neighorch_test } }; - TEST_F(NeighOrchTest, MultiVlanIpLearning) + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighbor) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); LearnNeighbor(VLAN_1000, TEST_IP, MAC1); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); LearnNeighbor(VLAN_2000, TEST_IP, MAC2); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 0); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 1); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); LearnNeighbor(VLAN_1000, TEST_IP, MAC3); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); @@ -201,4 +219,50 @@ namespace neighorch_test ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 0); } + + TEST_F(NeighOrchTest, MultiVlanDifferentVrfDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + LearnNeighbor(VLAN_3000, TEST_IP, MAC4); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 1); + } + + TEST_F(NeighOrchTest, MultiVlanSameVrfDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_3000, TEST_IP, MAC4); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_4000, TEST_IP, MAC5); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 0); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN4000_NEIGH), 1); + } + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighborMissingExistingVlanPort) + { + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + gPortsOrch->m_portList.erase(VLAN_1000); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + } + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighborMissingNewVlanPort) + { + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + gPortsOrch->m_portList.erase(VLAN_2000); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + } } From 9ffbcd5892653e12873dc58efd6a6d9bf5d9fc1b Mon Sep 17 00:00:00 2001 From: siqbal1986 Date: Thu, 30 May 2024 08:58:09 -0700 Subject: [PATCH 02/14] Added support for "UNDERLAY_SET_DSCP" and "UNDERLAY_SET_DSCPV6" tables (#3145) What I did This PR adds the logical table UNDERLAY_SET_DSCP and UNDERLAY_SET_DSCPV6. This feature allows SONIC to match an ingressing L4 packet and change the DSCP field of the outer header when the packet is egressing. These tables are only created on Cisco-8000, Mlnx and VS platforms. Why I did it The match set for these tables are following. The action for both of these tables is SET_DSCP UNDERLAY_SET_DSCP SRC_IP DST_IP IP_PROTOCOL L4_SRC_PORT L4_DST_PORT TCP_FLAGS DSCP UNDERLAY_SET_DSCPV6 SRC_IPV6 DST_IPV6 IPV6_NEXT_HEADER L4_SRC_PORT L4_DST_PORT DSCP Note: These tables are not created but only their names are used. Since we require matching based on L4 parameters, all the mentioned match attributes are necessary. Merging these into a single table would result in a larger TCAM footprint which may be impact existing ACL usage scenarios. --- orchagent/aclorch.cpp | 647 ++++++++++++++++++++++++++++++++++++++-- orchagent/aclorch.h | 65 +++- orchagent/acltable.h | 6 +- tests/dvslib/dvs_acl.py | 27 +- tests/test_acl_mark.py | 447 +++++++++++++++++++++++++++ 5 files changed, 1169 insertions(+), 23 deletions(-) create mode 100644 tests/test_acl_mark.py diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index 5ad908f082..fd20a212b1 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -11,7 +11,6 @@ #include "timer.h" #include "crmorch.h" #include "sai_serialize.h" - using namespace std; using namespace swss; @@ -33,6 +32,10 @@ extern string gMySwitchType; #define MIN_VLAN_ID 1 // 0 is a reserved VLAN ID #define MAX_VLAN_ID 4095 // 4096 is a reserved VLAN ID +#define METADATA_VALUE_START 1 +#define METADATA_VALUE_END 7 +#define METADATA_VALUE_INVALID 8 + #define STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY "is_action_list_mandatory" #define STATE_DB_ACL_ACTION_FIELD_ACTION_LIST "action_list" #define STATE_DB_ACL_L3V4V6_SUPPORTED "supported_L3V4V6" @@ -41,6 +44,8 @@ extern string gMySwitchType; #define ACL_COUNTER_DEFAULT_POLLING_INTERVAL_MS 10000 // ms #define ACL_COUNTER_DEFAULT_ENABLED_STATE false +#define EGR_SET_DSCP_TABLE_ID "EgressSetDSCP" + const int TCP_PROTOCOL_NUM = 6; // TCP protocol number acl_rule_attr_lookup_t aclMatchLookup = @@ -73,7 +78,8 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, - { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME} + { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME}, + { MATCH_METADATA, SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META} }; static acl_range_type_lookup_t aclRangeTypeLookup = @@ -122,6 +128,12 @@ static acl_packet_action_lookup_t aclPacketActionLookup = { PACKET_ACTION_DROP, SAI_PACKET_ACTION_DROP }, }; +static acl_rule_attr_lookup_t aclMetadataDscpActionLookup = +{ + { ACTION_META_DATA, SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA}, + { ACTION_DSCP, SAI_ACL_ENTRY_ATTR_ACTION_SET_DSCP} +}; + static acl_dtel_flow_op_type_lookup_t aclDTelFlowOpTypeLookup = { { DTEL_FLOW_OP_NOP, SAI_ACL_DTEL_FLOW_OP_NOP }, @@ -349,6 +361,42 @@ static acl_table_action_list_lookup_t defaultAclActionList = } } } + }, + { + // MARK_META + TABLE_TYPE_MARK_META, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_SET_ACL_META_DATA + } + } + } + }, + { + // MARK_METAV6 + TABLE_TYPE_MARK_META_V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_SET_ACL_META_DATA + } + } + } + }, + { + // EGR_SET_DSCP + TABLE_TYPE_EGR_SET_DSCP, + { + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_SET_DSCP + } + } + } } }; @@ -414,6 +462,18 @@ static acl_table_match_field_lookup_t stageMandatoryMatchFields = } } } + }, + { + // EGR_SET_DSCP + TABLE_TYPE_EGR_SET_DSCP, + { + { + ACL_STAGE_EGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META + } + } + } } }; @@ -706,7 +766,7 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT auto mirrorAction = aclMirrorStageLookup.find(action); auto dtelAction = aclDTelActionLookup.find(action); auto otherAction = aclOtherActionLookup.find(action); - + auto metadataAction = aclMetadataDscpActionLookup.find(action); if (l3Action != aclL3ActionLookup.end()) { saiActionAttr = l3Action->second; @@ -723,6 +783,10 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT { saiActionAttr = otherAction->second; } + else if (metadataAction != aclMetadataDscpActionLookup.end()) + { + saiActionAttr = metadataAction->second; + } else { SWSS_LOG_ERROR("Unknown action %s", action.c_str()); @@ -1032,6 +1096,17 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) return false; } } + else if (attr_name == MATCH_METADATA) + { + matchData.data.u8 = to_uint(attr_value); + matchData.mask.u8 = 0xFF; + // value must be between METADATA_VALUE_START and METADATA_VALUE_END inclusive. + if (matchData.data.u8 < METADATA_VALUE_START || matchData.data.u8 > METADATA_VALUE_END) + { + SWSS_LOG_ERROR("Invalid MATCH_METADATA configuration: %s, expected value between 1-7.", attr_value.c_str()); + return false; + } + } } catch (exception &e) { @@ -1599,7 +1674,7 @@ bool AclRule::getCreateCounter() const return m_createCounter; } -shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data) +shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data, MetaDataMgr * m_metadataMgr) { shared_ptr aclRule; @@ -1615,6 +1690,10 @@ shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOr { return make_shared(acl, rule, table); } + else if (acl->isUsingEgrSetDscp(table) || table == EGR_SET_DSCP_TABLE_ID) + { + return make_shared(acl, rule, table, m_metadataMgr); + } else if (aclDTelActionLookup.find(action) != aclDTelActionLookup.cend()) { if (!dtel) @@ -2142,6 +2221,100 @@ void AclRuleMirror::onUpdate(SubjectType type, void *cntx) } } +AclRuleUnderlaySetDscp::AclRuleUnderlaySetDscp(AclOrch *aclOrch, string rule, string table, MetaDataMgr* m_metaDataMgr, bool createCounter): + AclRule(aclOrch, rule, table, createCounter), + table_id(table), + m_metaDataMgr(m_metaDataMgr) +{ +} + +uint32_t AclRuleUnderlaySetDscp::getDscpValue() const +{ + return cachedDscpValue; +} + +uint32_t AclRuleUnderlaySetDscp::getMetadata() const +{ + return cachedMetadata; +} + +bool AclRuleUnderlaySetDscp::validateAddAction(string attr_name, string _attr_value) +{ + SWSS_LOG_ENTER(); + + string attr_value = to_upper(_attr_value); + + sai_object_id_t table_oid = m_pAclOrch->getTableById(table_id); + auto aclTable = m_pAclOrch->getTableByOid(table_oid); + string type = aclTable->type.getName(); + string key = table_id + ":" + m_id; + // we handle the allocation of metadata for here. based on SET_DSCP action, we check if a metadata is already allocated then we reuse it + // otherwise we allocate a new metadata. This metadata is then set an the action for the Rule of this table. We also cache the SET_DSCP + // value and the allocated metadata in a the rule structure itself so that when we go to addRule we can use these to add the + // egr_set_dscp rule + if (attr_name == ACTION_DSCP && (type == TABLE_TYPE_MARK_META || type == TABLE_TYPE_MARK_META_V6)) + { + if (!m_pAclOrch->isUsingEgrSetDscp(table_id)) + { + + SWSS_LOG_ERROR("Unexpected Error. Table %s not asssociated with EGR_SET_DSCP table", table_id.c_str()); + return false; + } + + u_int8_t actionDscpValue = uint8_t(std::stoi(attr_value)); + cachedDscpValue = actionDscpValue; + auto metadata = m_metaDataMgr->getFreeMetaData(actionDscpValue); + + if (metadata == METADATA_VALUE_INVALID) + { + SWSS_LOG_ERROR("Failed to get free metadata for DSCP value %d", actionDscpValue); + return false; + } + cachedMetadata = metadata; + attr_name = ACTION_META_DATA; + attr_value = std::to_string(metadata); + m_pAclOrch->addMetaDataRef(key, metadata); + } + + + sai_acl_action_data_t actionData; + actionData.parameter.u32 = 0; + + SWSS_LOG_INFO("attr_name: %s, attr_value: %s int val %d", attr_name.c_str(), attr_value.c_str(), to_uint(attr_value)); + // we only handle DSCP and META_DATA actions for now. + if (attr_name == ACTION_DSCP || attr_name == ACTION_META_DATA) + { + actionData.parameter.u32 = to_uint(attr_value); + if (attr_name == ACTION_META_DATA && (actionData.parameter.u32 < METADATA_VALUE_START || actionData.parameter.u32 > METADATA_VALUE_END)) + { + return false; + } + } + else + { + return false; + } + + actionData.enable = true; + return setAction(aclMetadataDscpActionLookup[attr_name], actionData); +} + +bool AclRuleUnderlaySetDscp::validate() +{ + SWSS_LOG_ENTER(); + if ( m_actions.size() != 1) + { + return false; + } + + return true; +} + +void AclRuleUnderlaySetDscp::onUpdate(SubjectType, void *) +{ + // Do nothing +} + AclTable::AclTable(AclOrch *pAclOrch, string id) noexcept : m_pAclOrch(pAclOrch), id(id) { @@ -3404,6 +3577,40 @@ void AclOrch::initDefaultTableTypes() ); } + addAclTableType( + builder.withName(TABLE_TYPE_MARK_META) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) + .build() + ); + + addAclTableType( + builder.withName(TABLE_TYPE_MARK_META_V6) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) + .build() + ); + + addAclTableType( + builder.withName(TABLE_TYPE_EGR_SET_DSCP) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META)) + .build() + ); // Placeholder for control plane tables addAclTableType(builder.withName(TABLE_TYPE_CTRLPLANE).build()); } @@ -3502,7 +3709,7 @@ void AclOrch::putAclActionCapabilityInDB(acl_stage_type_t stage) ostringstream acl_action_value_stream; ostringstream is_action_list_mandatory_stream; - for (const auto& action_map: {aclL3ActionLookup, aclMirrorStageLookup, aclDTelActionLookup}) + for (const auto& action_map: {aclL3ActionLookup, aclMirrorStageLookup, aclDTelActionLookup, aclMetadataDscpActionLookup}) { for (const auto& it: action_map) { @@ -3742,6 +3949,26 @@ void AclOrch::getAddDeletePorts(AclTable &newT, newPortSet.insert(p); } + // if the table type is TABLE_TYPE_EGR_SET_DSCP we use a single instance of this + // table with all the tables of type TABLE_TYPE_MARK_META/v6 therefoere we need to + // to collect all the ports from the tables of type TABLE_TYPE_MARK_META/v6 and + // put them in the newPortSet. + if (curT.id == EGR_SET_DSCP_TABLE_ID) + { + for(auto iter : m_egrSetDscpRef) + { + auto tableOid = getTableById(iter); + auto existingtable = m_AclTables.at(tableOid); + for (auto p : existingtable.pendingPortSet) + { + newPortSet.insert(p); + } + for (auto p : existingtable.portSet) + { + newPortSet.insert(p); + } + } + } // Collect current ports for (auto p : curT.pendingPortSet) { @@ -3848,6 +4075,176 @@ bool AclOrch::updateAclTable(AclTable ¤tTable, AclTable &newTable) return true; } +EgressSetDscpTableStatus AclOrch::addEgrSetDscpTable(string table_id, AclTable &table, string orignalTableTypeName) +{ + SWSS_LOG_ENTER(); + EgressSetDscpTableStatus status = EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_REQUIRED; + AclTable egrSetDscpTable(this); + // we only add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP or TABLE_TYPE_UNDERLAY_SET_DSCPV6 + // otherwise we return EGRESS_SET_DSCP_TABLE_NOT_REQUIRED. + if (orignalTableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCP || orignalTableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCPV6) + { + + AclTable egrSetDscpTable(this); + + // copy ports from the TABLE_TYPE_UNDERLAY_SET_DSCP/v6 to the egrSetDscpTable. + std::set ports; + ports.insert(table.portSet.begin(), table.portSet.end()); + ports.insert(table.pendingPortSet.begin(), table.pendingPortSet.end()); + for (auto alias : ports) + { + Port port; + if (!gPortsOrch->getPort(alias, port)) + { + SWSS_LOG_INFO("Add unready port %s to pending list for ACL table %s", + alias.c_str(), EGR_SET_DSCP_TABLE_ID); + egrSetDscpTable.pendingPortSet.emplace(alias); + continue; + } + + sai_object_id_t bind_port_id; + if (!getAclBindPortId(port, bind_port_id)) + { + SWSS_LOG_ERROR("Failed to get port %s bind port ID for ACL table %s", + alias.c_str(), EGR_SET_DSCP_TABLE_ID); + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + egrSetDscpTable.link(bind_port_id); + egrSetDscpTable.portSet.emplace(alias); + } + + egrSetDscpTable.id = EGR_SET_DSCP_TABLE_ID; + egrSetDscpTable.stage = ACL_STAGE_EGRESS; + auto egrSetDscpTableType = getAclTableType(TABLE_TYPE_EGR_SET_DSCP); + sai_object_id_t egrSetDscp_oid = getTableById(EGR_SET_DSCP_TABLE_ID); + // create the EGR_SET_DSCP fisrt time if not present. Otherwise update the existing table. + if (m_egrSetDscpRef.empty()) + { + // Create EGR_SET_DSCP table + egrSetDscpTable.validateAddType(*egrSetDscpTableType); + egrSetDscpTable.addMandatoryActions(); + if (!egrSetDscpTable.validate()) + { + SWSS_LOG_ERROR("Failed to validate ACL table %s", + EGR_SET_DSCP_TABLE_ID); + // since we failed to create the table, there is no need for rollback. + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED;; + } + if (!addAclTable(egrSetDscpTable)) + { + SWSS_LOG_ERROR("Failed to create ACL table EgressSetDSCP"); + // since we failed to create the table, there is no need for rollback. + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + } + else + { + if (updateAclTable(m_AclTables[egrSetDscp_oid], egrSetDscpTable)) + { + SWSS_LOG_INFO("Successfully updated existing ACL table EgressSetDSCP"); + // We do not set the status here as we still have to update + // TABLE_TYPE_MARK_META/V6 table. + } + else + { + SWSS_LOG_ERROR("Failed to update existing ACL table EgressSetDSCP"); + // there is no need for roollback as we have not made any changes to the MARK_META/V6 tables. + // We can simply return false. + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + } + // keep track of the fact that this table is now associated with the EGR_SET_DSCP table. + m_egrSetDscpRef.insert(table_id); + SWSS_LOG_INFO("Added ACL table %s to EgrSetDscpRef", table_id.c_str()); + status = EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS; + + } + return status; +} + +bool AclOrch::removeEgrSetDscpTable(string table_id) +{ + m_egrSetDscpRef.erase(table_id); + if (m_egrSetDscpRef.size() == 0) + { + if (!removeAclTable(EGR_SET_DSCP_TABLE_ID)) + { + SWSS_LOG_ERROR("Failed to remove ACL table %s", EGR_SET_DSCP_TABLE_ID); + return false; + } + } + else + { + //create a dummy table with no ports. The updateAclTable will remove the + // unique ports which were associated with table_id. + // The way this works is as follows. + // The getAddDeletePorts function collects all the ports of the tables which + // are in m_egrSetDscpRef set and adds those ports to the EGR_SET_DSCP. + // As a result the EGR_SET_DSCP is associated with all the ports to which the + // TABLE_TYPE_UNDERLAY_SET_DSCP/V6 tables are attached. + // + // when we want to remove one of the tables referencing the EGR_SET_DSCP. + // we remove it from m_egrSetDscpRef, then send a updateAclTable with a + // EGR_SET_DSCP table with no assiciated ports. + // The getAddDeletePorts collects all the ports except for the one assocated + // with the table we just removed from m_egrSetDscpRef and updated the EGR_SET_DSCP + // with new port set. + AclTable dummyTable(this); + dummyTable.id = EGR_SET_DSCP_TABLE_ID; + dummyTable.stage = ACL_STAGE_EGRESS; + if (updateAclTable(EGR_SET_DSCP_TABLE_ID, dummyTable, "")) + { + SWSS_LOG_ERROR("Failed to remove ACL table %s", EGR_SET_DSCP_TABLE_ID); + return false; + } + } + return true; +} + +bool AclOrch::addEgrSetDscpRule(string key, string dscpAction) +{ + auto metadata = m_egrDscpRuleMetadata[key]; + + if (m_metadataEgrDscpRule[metadata].size() == 1) + { + // Create EGR_SET_DSCP rule. set the match criteria to metadata value and action to dscpAction. + auto egrSetDscpRule = make_shared(this, std::to_string(metadata), EGR_SET_DSCP_TABLE_ID, &m_metaDataMgr); + egrSetDscpRule->validateAddMatch(MATCH_METADATA, std::to_string(metadata)); + egrSetDscpRule->validateAddAction(ACTION_DSCP, dscpAction); + + if (egrSetDscpRule->validate()) + { + if (!addAclRule(egrSetDscpRule, EGR_SET_DSCP_TABLE_ID)) + { + SWSS_LOG_ERROR("Failed to create ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); + return false; + } + } + else + { + SWSS_LOG_ERROR("Failed to validate ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); + return false; + } + } + return true; +} + +bool AclOrch::removeEgrSetDscpRule(string key) +{ + auto metadata = m_egrDscpRuleMetadata[key]; + if (getMetaDataRefCount(metadata) == 1) + { + if(!removeAclRule(EGR_SET_DSCP_TABLE_ID, std::to_string(metadata))) + { + SWSS_LOG_ERROR("Failed to remove ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); + return false; + } + } + removeMetaDataRef(key, metadata); + m_metaDataMgr.recycleMetaData(metadata); + return true; +} + bool AclOrch::updateAclTable(string table_id, AclTable &table) { SWSS_LOG_ENTER(); @@ -3867,6 +4264,29 @@ bool AclOrch::updateAclTable(string table_id, AclTable &table) return true; } +bool AclOrch::updateAclTable(string table_id, AclTable &table, string orignalTableTypeName) +{ + SWSS_LOG_ENTER(); + // we call the addEgrSetDscpTable to add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP or TABLE_TYPE_UNDERLAY_SET_DSCPV6 + // for other tables it simply retuns EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_REQUIRED + EgressSetDscpTableStatus egrSetDscpStatus = addEgrSetDscpTable(table_id, table, orignalTableTypeName); + bool status = false; + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED) + { + return false; + } + status = updateAclTable(table_id,table); + // if we have not updated the EGR_SET_DSCP, we simply need to return the status. + // otherewise we need to undo the changes we made to the EGR_SET_DSCP if the update + // of the MARK_META table failed. + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS && !status) + { + // This is the scenario where we have successfully updated the EGR_SET_DSCP but failed to update the MARK_META table. + SWSS_LOG_ERROR("Reverting changes to EGR_SET_DSCP because update of %s failed", table_id.c_str()); + removeEgrSetDscpTable(table_id); + } + return status; +} bool AclOrch::addAclTable(AclTable &newTable) { @@ -3983,6 +4403,30 @@ bool AclOrch::addAclTable(AclTable &newTable) } } +bool AclOrch::addAclTable(string table_id, AclTable &newTable, string orignalTableTypeName) +{ + SWSS_LOG_ENTER(); + // we call the addEgrSetDscpTable to add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP + // or TABLE_TYPE_UNDERLAY_SET_DSCPV6. For other tables it simply retuns EGRESS_SET_DSCP_TABLE_NOT_REQUIRED. + EgressSetDscpTableStatus egrSetDscpStatus = addEgrSetDscpTable(table_id, newTable, orignalTableTypeName); + bool status = false; + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED) + { + return false; + } + status = addAclTable(newTable); + // if we have not updated the EGR_SET_DSCP, we simply need to return the status. + // otherewise we need to undo the changes we made to the EGR_SET_DSCP if the update + // of the MARK_META table failed. + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS && !status) + { + // This is the scenario where we have successfully updated the EGR_SET_DSCP but failed to update the MARK_META table. + SWSS_LOG_ERROR("Reverting changes to EGR_SET_DSCP because update of %s failed", table_id.c_str()); + removeEgrSetDscpTable(table_id); + } + return status; +} + bool AclOrch::removeAclTable(string table_id) { SWSS_LOG_ENTER(); @@ -4041,6 +4485,20 @@ bool AclOrch::removeAclTable(string table_id) } } +bool AclOrch::removeAclTableWithEgrDscp(string table_id) +{ + SWSS_LOG_ENTER(); + bool egrSetDscpStatus = true; + if(m_egrSetDscpRef.find(table_id) != m_egrSetDscpRef.end()) + { + egrSetDscpStatus = removeEgrSetDscpTable(table_id); + } + if (!egrSetDscpStatus) + { + return false; + } + return removeAclTable(table_id); +} bool AclOrch::addAclTableType(const AclTableType& tableType) { SWSS_LOG_ENTER(); @@ -4101,6 +4559,34 @@ bool AclOrch::addAclRule(shared_ptr newRule, string table_id) return true; } +bool AclOrch::addAclRuleWithEgrSetDscp(shared_ptr newRule, string table_id) +{ + SWSS_LOG_ENTER(); + bool needsEgrSetDscp = false; + string key = table_id + ":" + newRule->getId(); + // if the table is using EGR_SET_DSCP, we need to add the EGR_SET_DSCP rule. + if (isUsingEgrSetDscp(table_id)) + { + needsEgrSetDscp = true; + string dscpAction = std::to_string(std::static_pointer_cast(newRule)->getDscpValue()); + if (!addEgrSetDscpRule(key, dscpAction)) + { + SWSS_LOG_ERROR("Failed to add Egress Set Dscp rule for Rule %s in table %s.", + newRule->getId().c_str(), table_id.c_str()); + return false; + } + } + // add the regular rule. + bool status = addAclRule(newRule, table_id); + if(!status && needsEgrSetDscp) + { + removeEgrSetDscpRule(key); + return false; + } + + return status; +} + bool AclOrch::removeAclRule(string table_id, string rule_id) { sai_object_id_t table_oid = getTableById(table_id); @@ -4126,6 +4612,19 @@ bool AclOrch::removeAclRule(string table_id, string rule_id) return m_AclTables[table_oid].remove(rule_id); } +bool AclOrch::removeAclRuleWithEgrSetDscp(string table_id, string rule_id) +{ + string key = table_id + ":" + rule_id; + if (m_egrDscpRuleMetadata.find(key) != m_egrDscpRuleMetadata.end()) + { + if (!removeEgrSetDscpRule(key)) + { + return false; + } + } + return removeAclRule(table_id, rule_id); +} + AclRule* AclOrch::getAclRule(string table_id, string rule_id) { sai_object_id_t table_oid = getTableById(table_id); @@ -4359,6 +4858,56 @@ bool AclOrch::isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_ac return it->second.find(param.s32) != it->second.cend(); } +bool AclOrch::isUsingEgrSetDscp(const string& table) const +{ + if (m_egrSetDscpRef.find(table) != m_egrSetDscpRef.end()) + { + return true; + } + return false; +} + +string AclOrch::translateUnderlaySetDscpTableTypeName(const string& tableTypeName) const +{ + // The TABLE_TYPE_UNDERLAY_SET_DSCP/V6 is translated to table translates into TABLE_TYPE_MARK_META/V6 + if (tableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCP) + { + return TABLE_TYPE_MARK_META; + } + else if(tableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCPV6) + { + return TABLE_TYPE_MARK_META_V6; + } + return tableTypeName; +} + +void AclOrch::addMetaDataRef(string key, uint8_t metadata) +{ + m_egrDscpRuleMetadata[key] = metadata; + if (m_metadataEgrDscpRule.find(metadata) == m_metadataEgrDscpRule.end()) + { + m_metadataEgrDscpRule[metadata] = set(); + } + m_metadataEgrDscpRule[metadata].insert(key); + +} + +void AclOrch::removeMetaDataRef(string key, uint8_t metadata) +{ + m_metadataEgrDscpRule[metadata].erase(key); + m_egrDscpRuleMetadata.erase(key); +} + +uint32_t AclOrch::getMetaDataRefCount(uint8_t metadata) +{ + if (m_metadataEgrDscpRule.find(metadata) != m_metadataEgrDscpRule.end()) + { + return uint32_t(m_metadataEgrDscpRule[metadata].size()); + } + return 0; +} + + void AclOrch::doAclTableTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -4435,6 +4984,14 @@ void AclOrch::doAclTableTask(Consumer &consumer) break; } } + // For the case of Table type TABLE_TYPE_UNDERLAY_SET_DSCP/V6 we need to translate + // it to TABLE_TYPE_MARK_META/V6. We retain the original table type name in orignalTableTypeName + // and pass it ot the updateAclTable/ addAclTable functions. There based on the orignalTableTypeName + // we create/update the EgrSetDscp table. + string firstTableTypeName; + string unused; + string orignalTableTypeName = tableTypeName; + tableTypeName = translateUnderlaySetDscpTableTypeName(tableTypeName); auto tableType = getAclTableType(tableTypeName); if (!tableType) @@ -4461,7 +5018,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) m_AclTables[table_oid])) { // Update the existing table using the info in newTable - if (updateAclTable(m_AclTables[table_oid], newTable)) + if (updateAclTable(table_id, newTable, orignalTableTypeName)) { SWSS_LOG_NOTICE("Successfully updated existing ACL table %s", table_id.c_str()); @@ -4478,7 +5035,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else { - if (addAclTable(newTable)) + if (addAclTable(table_id, newTable, orignalTableTypeName)) { // Mark ACL table as ACTIVE setAclTableStatus(table_id, AclObjectStatus::ACTIVE); @@ -4486,6 +5043,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else { + //we have failed to create the MarkMeta table, we need to remove the EgrSetDscp table setAclTableStatus(table_id, AclObjectStatus::PENDING_CREATION); it++; } @@ -4502,7 +5060,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else if (op == DEL_COMMAND) { - if (removeAclTable(table_id)) + if (removeAclTableWithEgrDscp(table_id)) { // Remove ACL table status from STATE_DB removeAclTableStatus(table_id); @@ -4581,7 +5139,7 @@ void AclOrch::doAclRuleTask(Consumer &consumer) try { - newRule = AclRule::makeShared(this, m_mirrorOrch, m_dTelOrch, rule_id, table_id, t); + newRule = AclRule::makeShared(this, m_mirrorOrch, m_dTelOrch, rule_id, table_id, t, &m_metaDataMgr); } catch (exception &e) { @@ -4658,20 +5216,19 @@ void AclOrch::doAclRuleTask(Consumer &consumer) SWSS_LOG_ERROR("Failed to add attribute '%s : %s'", attr_name.c_str(), attr_value.c_str()); } } - if (bHasIPV4 && bHasIPV6) - { - if (type == TABLE_TYPE_L3V4V6) - { - SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); - bAllAttributesOk = false; - } - } + { + if (type == TABLE_TYPE_L3V4V6) + { + SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); + bAllAttributesOk = false; + } + } // validate and create ACL rule if (bAllAttributesOk && newRule->validate()) { - if (addAclRule(newRule, table_id)) + if (addAclRuleWithEgrSetDscp(newRule, table_id)) { setAclRuleStatus(table_id, rule_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); @@ -4692,7 +5249,7 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } else if (op == DEL_COMMAND) { - if (removeAclRule(table_id, rule_id)) + if (removeAclRuleWithEgrSetDscp(table_id, rule_id)) { removeAclRuleStatus(table_id, rule_id); it = consumer.m_toSync.erase(it); @@ -5112,3 +5669,55 @@ void AclOrch::removeAllAclRuleStatus() } } +MetaDataMgr::MetaDataMgr() +{ + for (uint8_t i = METADATA_VALUE_START; i <= METADATA_VALUE_END; i++) + { + m_freeMetadata.push_back(i); + } +} +uint8_t MetaDataMgr::getFreeMetaData(uint8_t dscp) +{ + uint8_t metadata =METADATA_VALUE_INVALID; + if (m_dscpMetadata.find(dscp) != m_dscpMetadata.end()) + { + // dscp value has a metadata value assigned to it. + metadata = m_dscpMetadata[dscp]; + } + else + { + if (m_freeMetadata.empty()) + { + SWSS_LOG_ERROR("Metadata Value not available for allocation."); + return metadata; + } + metadata = m_freeMetadata.front(); + m_freeMetadata.erase(m_freeMetadata.begin()); + m_dscpMetadata[dscp] = metadata; + } + m_MetadataRef[metadata] += 1; + return metadata; +} + +void MetaDataMgr::recycleMetaData(uint8_t metadata) +{ + m_MetadataRef[metadata] -= 1; + if (m_MetadataRef[metadata] == 0) + { + + for (auto iter = m_dscpMetadata.begin(); iter != m_dscpMetadata.end();) + { + if ( iter->second == metadata) + { + m_dscpMetadata.erase(iter++); + m_freeMetadata.push_front(metadata); + break; + } + else + { + ++iter; + } + } + } +} + diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index abeaf519e2..1c0490d862 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -51,6 +51,7 @@ #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" #define MATCH_BTH_OPCODE "BTH_OPCODE" #define MATCH_AETH_SYNDROME "AETH_SYNDROME" +#define MATCH_METADATA "META_DATA" #define BIND_POINT_TYPE_PORT "PORT" #define BIND_POINT_TYPE_PORTCHANNEL "PORTCHANNEL" @@ -68,6 +69,8 @@ #define ACTION_DTEL_FLOW_SAMPLE_PERCENT "FLOW_SAMPLE_PERCENT" #define ACTION_DTEL_REPORT_ALL_PACKETS "REPORT_ALL_PACKETS" #define ACTION_COUNTER "COUNTER" +#define ACTION_META_DATA "META_DATA_ACTION" +#define ACTION_DSCP "DSCP_ACTION" #define PACKET_ACTION_FORWARD "FORWARD" #define PACKET_ACTION_DROP "DROP" @@ -109,6 +112,13 @@ enum AclObjectStatus PENDING_REMOVAL }; +enum EgressSetDscpTableStatus +{ + EGRESS_SET_DSCP_TABLE_FAILED = 0, + EGRESS_SET_DSCP_TABLE_SUCCESS, + EGRESS_SET_DSCP_TABLE_NOT_REQUIRED, +}; + struct AclActionCapabilities { set actionList; @@ -165,6 +175,20 @@ class AclTableRangeMatch: public AclTableMatchInterface private: vector m_rangeList; }; + +class MetaDataMgr +{ +public: + MetaDataMgr(); + uint8_t getFreeMetaData(uint8_t dscp); + void recycleMetaData(uint8_t metadata); + +private: + list m_freeMetadata; + map m_dscpMetadata; + map m_MetadataRef; +}; + class AclTableType { public: @@ -278,7 +302,7 @@ class AclRule bool getCreateCounter() const; const vector& getRangeConfig() const; - static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&); + static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&, MetaDataMgr * m_metadataMgr); virtual ~AclRule() {} protected: @@ -377,6 +401,23 @@ class AclRuleDTelWatchListEntry: public AclRule bool INT_session_valid; }; +class AclRuleUnderlaySetDscp: public AclRule +{ +public: + AclRuleUnderlaySetDscp(AclOrch *m_pAclOrch, string rule, string table, MetaDataMgr* m_metaDataMgr, bool createCounter = true); + + bool validateAddAction(string attr_name, string attr_value); + bool validate(); + void onUpdate(SubjectType, void *) override; + uint32_t getDscpValue() const; + uint32_t getMetadata() const; +protected: + uint32_t cachedDscpValue; + uint32_t cachedMetadata; + string table_id; + MetaDataMgr* m_metaDataMgr; +}; + class AclTable { public: @@ -487,6 +528,17 @@ class AclOrch : public Orch, public Observer bool addAclTable(AclTable &aclTable); bool removeAclTable(string table_id); + bool addAclTable(string table_id, AclTable &aclTable, string orignalTableTypeName); + bool removeAclTableWithEgrDscp(string table_id); + bool updateAclTable(string table_id, AclTable &table, string orignalTableTypeName); + EgressSetDscpTableStatus addEgrSetDscpTable(string table_id, AclTable &table, string orignalTableTypeName); + + bool removeEgrSetDscpTable(string table_id); + bool addEgrSetDscpRule(string key, string dscpAction); + bool removeEgrSetDscpRule(string key); + bool addAclRuleWithEgrSetDscp(shared_ptr aclRule, string table_id); + bool removeAclRuleWithEgrSetDscp(string table_id, string rule_id); + bool addAclTableType(const AclTableType& tableType); bool removeAclTableType(const string& tableTypeName); bool updateAclTable(AclTable ¤tTable, AclTable &newTable); @@ -506,6 +558,12 @@ class AclOrch : public Orch, public Observer bool isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) const; bool isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const; bool isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_acl_action_parameter_t param) const; + bool isUsingEgrSetDscp(const string& table) const; + string translateUnderlaySetDscpTableTypeName(const string& tableTypeName) const; + + void addMetaDataRef(string key, uint8_t metadata); + void removeMetaDataRef(string key, uint8_t metadata); + uint32_t getMetaDataRefCount(uint8_t metadata); bool m_isCombinedMirrorV6Table = true; map m_mirrorTableCapabilities; @@ -586,9 +644,12 @@ class AclOrch : public Orch, public Observer Table m_aclTableStateTable; Table m_aclRuleStateTable; + MetaDataMgr m_metaDataMgr; map m_mirrorTableId; map m_mirrorV6TableId; - + set m_egrSetDscpRef; + map> m_metadataEgrDscpRule; + map m_egrDscpRuleMetadata; acl_capabilities_t m_aclCapabilities; acl_action_enum_values_capabilities_t m_aclEnumActionCapabilities; FlexCounterManager m_flex_counter_manager; diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 1b1cdeb29a..7c4ff86813 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -35,7 +35,11 @@ extern "C" { #define TABLE_TYPE_MCLAG "MCLAG" #define TABLE_TYPE_MUX "MUX" #define TABLE_TYPE_DROP "DROP" - +#define TABLE_TYPE_MARK_META "MARK_META" +#define TABLE_TYPE_MARK_META_V6 "MARK_METAV6" +#define TABLE_TYPE_EGR_SET_DSCP "EGR_SET_DSCP" +#define TABLE_TYPE_UNDERLAY_SET_DSCP "UNDERLAY_SET_DSCP" +#define TABLE_TYPE_UNDERLAY_SET_DSCPV6 "UNDERLAY_SET_DSCPV6" typedef enum { ACL_STAGE_UNKNOWN, diff --git a/tests/dvslib/dvs_acl.py b/tests/dvslib/dvs_acl.py index 236ccaa0fc..dc338ce9f1 100644 --- a/tests/dvslib/dvs_acl.py +++ b/tests/dvslib/dvs_acl.py @@ -331,7 +331,32 @@ def verify_acl_table_action_list( for action in expected_action_list: assert action in action_list - + def create_dscp_acl_rule( + self, + table_name: str, + rule_name: str, + qualifiers: Dict[str, str], + action: str, + priority: str = "2020" + ) -> None: + """Create a new DSCP ACL rule in the given table. + + Args: + table_name: The name of the ACL table to add the rule to. + rule_name: The name of the ACL rule. + qualifiers: The list of qualifiers to add to the rule. + action: DSCP value. + priority: The priority of the rule. + """ + fvs = { + "priority": priority, + "DSCP_ACTION": action + } + + for k, v in qualifiers.items(): + fvs[k] = v + self.config_db.create_entry("ACL_RULE", "{}|{}".format(table_name, rule_name), fvs) + def create_acl_rule( self, table_name: str, diff --git a/tests/test_acl_mark.py b/tests/test_acl_mark.py new file mode 100644 index 0000000000..be09e7df8e --- /dev/null +++ b/tests/test_acl_mark.py @@ -0,0 +1,447 @@ +import pytest +from requests import request + +OVERLAY_TABLE_TYPE = "UNDERLAY_SET_DSCP" +OVERLAY_TABLE_NAME = "OVERLAY_MARK_META_TEST" +OVERLAY_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] +OVERLAY_RULE_NAME = "OVERLAY_TEST_RULE" + +OVERLAY_TABLE_TYPE6 = "UNDERLAY_SET_DSCPV6" +OVERLAY_TABLE_NAME6 = "OVERLAY_MARK_META_TEST6" +OVERLAY_BIND_PORTS6 = ["Ethernet20", "Ethernet24", "Ethernet28", "Ethernet32"] +OVERLAY_RULE_NAME6 = "OVERLAY_TEST_RULE6" + +# tests for UNDERLAY_SET_DSCP table + + +class TestAclMarkMeta: + @pytest.fixture + def overlay_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, + OVERLAY_TABLE_TYPE, + OVERLAY_BIND_PORTS) + yield dvs_acl.get_acl_table_ids(2) + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + + @pytest.fixture + def overlay6_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, + OVERLAY_TABLE_TYPE6, + OVERLAY_BIND_PORTS6) + yield dvs_acl.get_acl_table_ids(2) + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + + def verify_acl_table_group_members_multitable(self, dvs_acl, acl_table_id, acl_table_group_ids, member_count): + members = dvs_acl.asic_db.wait_for_n_keys(dvs_acl.ADB_ACL_GROUP_MEMBER_TABLE_NAME, + member_count) + + member_groups = [] + table_member_map = {} + for member in members: + fvs = dvs_acl.asic_db.wait_for_entry(dvs_acl.ADB_ACL_GROUP_MEMBER_TABLE_NAME, member) + group_id = fvs.get("SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID") + table_id = fvs.get("SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID") + + if group_id in acl_table_group_ids and table_id in acl_table_id: + member_groups.append(group_id) + if table_id not in table_member_map: + table_member_map[table_id] = [] + table_member_map[table_id].append(group_id) + + assert set(member_groups) == set(acl_table_group_ids) + return table_member_map + + def get_table_stage(self, dvs_acl, acl_table_id, v4_ports, v6_ports): + stages = [] + names = [] + ports = [] + for table in acl_table_id: + fvs = dvs_acl.asic_db.wait_for_entry(dvs_acl.ADB_ACL_TABLE_NAME, table) + stage = fvs.get("SAI_ACL_TABLE_ATTR_ACL_STAGE") + if stage == "SAI_ACL_STAGE_INGRESS": + stages.append("ingress") + elif stage == "SAI_ACL_STAGE_EGRESS": + stages.append("egress") + qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META") + if qual == "true": + names.append("EGR_SET_DSCP") + ports.append(v4_ports+v6_ports) + qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6") + if qual == "true": + names.append("MARK_META6") + ports.append(v6_ports) + qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_DST_IP") + if qual == "true": + names.append("MARK_META") + ports.append(v4_ports) + return stages, names, ports + + def verify_acl_table_port_binding_multi(self, dvs_acl, table_member_map, bind_ports, stages, acl_table_id): + for i in range(0, len(stages)): + stage = stages[i] + table = acl_table_id[i] + port_groups = [] + for port in bind_ports[i]: + port_oid = dvs_acl.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "").get(port) + fvs = dvs_acl.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) + acl_table_group_id = fvs.pop(dvs_acl.ADB_PORT_ATTR_LOOKUP[stage], None) + assert acl_table_group_id in table_member_map[table] + port_groups.append(acl_table_group_id) + + assert len(port_groups) == len(bind_ports[i]) + assert set(port_groups) == set(table_member_map[table]) + + + def get_acl_rules_with_action(self, dvs_acl, total_rules): + """Verify that there are N rules in the ASIC DB.""" + members = dvs_acl.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", + total_rules) + + member_groups = [] + table_member_map = {} + for member in members: + fvs = dvs_acl.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", member) + table_id = fvs.get("SAI_ACL_ENTRY_ATTR_TABLE_ID") + entry = {} + entry['id'] = member + action = fvs.get("SAI_ACL_ENTRY_ATTR_ACTION_SET_DSCP") + if action: + entry['action_type'] = "dscp" + entry['action_value'] = action + meta = fvs.get("SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META") + entry['match_meta'] = meta.split('&')[0] + action = fvs.get("SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA") + if action: + entry['action_type'] = "meta" + entry['action_value'] = action + + if table_id not in table_member_map: + table_member_map[table_id] = [] + table_member_map[table_id].append(entry) + return table_member_map + + def verify_acl_rules_with_action(self, table_names, acl_table_id, table_rules, meta, dscp): + for i in range(0, len(table_names)): + if acl_table_id[i] in table_rules: + for j in range(0, len(table_rules[acl_table_id[i]])): + if table_names[i] == "MARK_META" or table_names[i] == "MARK_META6": + assert table_rules[acl_table_id[i]][j]['action_type'] == "meta" + assert table_rules[acl_table_id[i]][j]['action_value'] in meta + else: + assert table_rules[acl_table_id[i]][j]['action_type'] == "dscp" + assert table_rules[acl_table_id[i]][j]['action_value'] in dscp + assert table_rules[acl_table_id[i]][j]['match_meta'] in meta + + def test_OverlayTableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(2) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, OVERLAY_BIND_PORTS, []) + + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) + + def test_Overlay6TableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS6) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(2) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS6) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) + + def test_OverlayBothv4v6TableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS6) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(3) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id,OVERLAY_BIND_PORTS, OVERLAY_BIND_PORTS6) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*4) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 16) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(2) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) + acl_table_id = dvs_acl.get_acl_table_ids(2) + + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS6) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) + + def test_OverlayBothv4v6TableSameintfCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(3) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id,OVERLAY_BIND_PORTS, OVERLAY_BIND_PORTS) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 12) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(2) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) + acl_table_id = dvs_acl.get_acl_table_ids(2) + + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) + + def test_OverlayEntryCreationDeletion(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32"} + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "VALID_RULE", config_qualifiers,action="12") + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "VALID_RULE", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "VALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "VALID_RULE", None) + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryMultiRuleRef(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 1st Rule + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") + #create 2nd Rule + config_qualifiers["DSCP"] = "2" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "2", config_qualifiers, action="12") + #create 3rd Rule + config_qualifiers["DSCP"] = "3" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "3", config_qualifiers, action="12") + + #This should create 4 rules 3 for MARK_META and 1 for EGR_SET_DSCP + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 4) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + + # remove first rule. We should still have 3 rules, 2 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 3) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + + # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "2") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + + # Verify the STATE_DB entry is removed + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "3") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", None) + + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryMultiTableRules(self, dvs_acl): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1"} + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, + OVERLAY_TABLE_TYPE, + OVERLAY_BIND_PORTS) + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, + OVERLAY_TABLE_TYPE6, + OVERLAY_BIND_PORTS6) + acl_table_id = dvs_acl.get_acl_table_ids(3) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 1st Rule + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + + #create 2nd Rule ipv6 + config_qualifiers6 = {"SRC_IPV6": "2777::0/64", + "DST_IPV6": "2788::0/64", + "DSCP" : "1"}; + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME6, "1", config_qualifiers6, action="12") + + # Verify status of both rules. + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME6, "1", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 3) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + + # remove first rule. We should still have 1 rule, 1 for MARK_META + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME6, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME6, "1", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + + # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + dvs_acl.verify_no_acl_rules() + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + + def test_OverlayEntryMultiMetaRule(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 1st Rule + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") + #create 2nd Rule + config_qualifiers["DSCP"] = "2" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "2", config_qualifiers, action="13") + #create 3rd Rule + config_qualifiers["DSCP"] = "3" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "3", config_qualifiers, action="14") + + #This should create 4 rules 3 for MARK_META and 1 for EGR_SET_DSCP + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 6) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) + + # remove first rule. We should still have 3 rules, 2 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 4) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + + # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "2") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + + # Verify the STATE_DB entry is removed + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "3") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", None) + + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryExhaustMeta(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 8 rules. 8th one should fail. + for i in range(1, 9): + config_qualifiers["DSCP"] = str(i) + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, str(i), config_qualifiers, action=str(i+10)) + if i < 8: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), "Active") + else: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + + table_rules = self.get_acl_rules_with_action(dvs_acl, 14) + meta = [str(i) for i in range(1, 8)] + dscps = [str(i) for i in range(11, 18)] + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, meta, dscps) + + for i in range(1, 9): + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, str(i)) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryTestMetaDataMgr(self, dvs_acl, overlay_acl_table): + # allocate all 7 metadata values and free them multiple times. + # At the end there should be no rules allocated. + for i in range(1, 4): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 8 rules. 8th one should fail. + for i in range(1, 9): + config_qualifiers["DSCP"] = str(i) + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, str(i), config_qualifiers, action=str(i+10)) + if i < 8: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), "Active") + else: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + + table_rules = self.get_acl_rules_with_action(dvs_acl, 14) + meta = [str(i) for i in range(1, 8)] + dscps = [str(i) for i in range(11, 18)] + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, meta, dscps) + + for i in range(1, 9): + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, str(i)) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + dvs_acl.verify_no_acl_rules() + + # Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass From c7ecd7db784b5066e96b9f086c7fbb179ae1ea87 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Thu, 30 May 2024 12:08:26 -0700 Subject: [PATCH 03/14] Add fabric port monitoring toggle check (#3132) What I did Add fabric port monitoring toggle check. The command is added at sonic-net/sonic-utilities#2932. This is the orchagent handling part. --- cfgmgr/fabricmgr.cpp | 7 +++ cfgmgr/fabricmgr.h | 2 +- orchagent/fabricportsorch.cpp | 66 ++++++++++++++++++++++++++++- orchagent/fabricportsorch.h | 1 + orchagent/orchdaemon.cpp | 7 ++- tests/test_fabric_capacity.py | 7 ++- tests/test_fabric_port.py | 11 ++++- tests/test_fabric_port_isolation.py | 6 ++- tests/test_fabric_rate.py | 5 ++- 9 files changed, 101 insertions(+), 11 deletions(-) diff --git a/cfgmgr/fabricmgr.cpp b/cfgmgr/fabricmgr.cpp index 16a8111199..bb2420387c 100644 --- a/cfgmgr/fabricmgr.cpp +++ b/cfgmgr/fabricmgr.cpp @@ -41,6 +41,7 @@ void FabricMgr::doTask(Consumer &consumer) string monPollThreshRecovery, monPollThreshIsolation; string isolateStatus; string alias, lanes; + string enable; std::vector field_values; string value; @@ -66,6 +67,12 @@ void FabricMgr::doTask(Consumer &consumer) monPollThreshIsolation = fvValue(i); writeConfigToAppDb(key, "monPollThreshIsolation", monPollThreshIsolation); } + else if (fvField(i) == "monState") + { + SWSS_LOG_INFO("Enable fabric monitoring setting in appl_db."); + enable = fvValue(i); + writeConfigToAppDb(key, "monState", enable); + } else if (fvField(i) == "alias") { alias = fvValue(i); diff --git a/cfgmgr/fabricmgr.h b/cfgmgr/fabricmgr.h index 1fd399fef9..afadd26d57 100644 --- a/cfgmgr/fabricmgr.h +++ b/cfgmgr/fabricmgr.h @@ -20,7 +20,7 @@ class FabricMgr : public Orch private: Table m_cfgFabricMonitorTable; Table m_cfgFabricPortTable; - Table m_appFabricMonitorTable; + ProducerStateTable m_appFabricMonitorTable; ProducerStateTable m_appFabricPortTable; void doTask(Consumer &consumer); diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index b9f6283fce..159f415beb 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -101,7 +101,36 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vectorstart(); + bool fabricPortMonitor = checkFabricPortMonState(); + if (fabricPortMonitor) + { + m_debugTimer->start(); + SWSS_LOG_INFO("Fabric monitor starts at init time"); + } +} + +bool FabricPortsOrch::checkFabricPortMonState() +{ + bool enabled = false; + std::vector constValues; + bool setCfgVal = m_applMonitorConstTable->get("FABRIC_MONITOR_DATA", constValues); + if (!setCfgVal) + { + return enabled; + } + SWSS_LOG_INFO("FabricPortsOrch::checkFabricPortMonState starts"); + for (auto cv : constValues) + { + if (fvField(cv) == "monState") + { + if (fvValue(cv) == "enable") + { + enabled = true; + return enabled; + } + } + } + return enabled; } int FabricPortsOrch::getFabricPortList() @@ -1188,7 +1217,12 @@ void FabricPortsOrch::doTask() void FabricPortsOrch::doFabricPortTask(Consumer &consumer) { - SWSS_LOG_NOTICE("FabricPortsOrch::doFabricPortTask"); + if (!checkFabricPortMonState()) + { + SWSS_LOG_INFO("doFabricPortTask returns early due to feature disabled"); + return; + } + SWSS_LOG_INFO("FabricPortsOrch::doFabricPortTask starts"); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -1350,11 +1384,38 @@ void FabricPortsOrch::doTask(Consumer &consumer) SWSS_LOG_NOTICE("doTask from FabricPortsOrch"); string table_name = consumer.getTableName(); + SWSS_LOG_INFO("Table name: %s", table_name.c_str()); if (table_name == APP_FABRIC_MONITOR_PORT_TABLE_NAME) { doFabricPortTask(consumer); } + if (table_name == APP_FABRIC_MONITOR_DATA_TABLE_NAME) + { + SWSS_LOG_INFO("doTask for APP_FABRIC_MONITOR_DATA_TABLE_NAME"); + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "monState") + { + if (fvValue(i) == "enable") + { + m_debugTimer->start(); + SWSS_LOG_INFO("debugTimer started"); + } + else + { + m_debugTimer->stop(); + SWSS_LOG_INFO("debugTimer stopped"); + } + } + } + it = consumer.m_toSync.erase(it); + } + } } void FabricPortsOrch::doTask(swss::SelectableTimer &timer) @@ -1384,6 +1445,7 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer) if (m_getFabricPortListDone) { + SWSS_LOG_INFO("Fabric monitor enabled"); updateFabricDebugCounters(); updateFabricCapacity(); updateFabricRate(); diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index 3a7cb52f04..d94ece698e 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -65,6 +65,7 @@ class FabricPortsOrch : public Orch, public Subject void updateFabricPortState(); void updateFabricDebugCounters(); void updateFabricCapacity(); + bool checkFabricPortMonState(); void updateFabricRate(); void doTask() override; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 3fc44bf81a..d5bda136fb 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -20,6 +20,7 @@ using namespace swss; #define PFC_WD_POLL_MSECS 100 #define APP_FABRIC_MONITOR_PORT_TABLE_NAME "FABRIC_PORT_TABLE" +#define APP_FABRIC_MONITOR_DATA_TABLE_NAME "FABRIC_MONITOR_TABLE" /* orchagent heart beat message interval */ #define HEART_BEAT_INTERVAL_MSECS 10 * 1000 @@ -527,7 +528,8 @@ bool OrchDaemon::init() // register APP_FABRIC_MONITOR_PORT_TABLE_NAME table const int fabric_portsorch_base_pri = 30; vector fabric_port_tables = { - { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri } + { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri }, + { APP_FABRIC_MONITOR_DATA_TABLE_NAME, fabric_portsorch_base_pri } }; gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables, m_fabricPortStatEnabled, m_fabricQueueStatEnabled); m_orchList.push_back(gFabricPortsOrch); @@ -1088,7 +1090,8 @@ bool FabricOrchDaemon::init() const int fabric_portsorch_base_pri = 30; vector fabric_port_tables = { - { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri } + { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri }, + { APP_FABRIC_MONITOR_DATA_TABLE_NAME, fabric_portsorch_base_pri } }; gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables); addOrchList(gFabricPortsOrch); diff --git a/tests/test_fabric_capacity.py b/tests/test_fabric_capacity.py index 91bb1b5e94..a796e9f6bf 100644 --- a/tests/test_fabric_capacity.py +++ b/tests/test_fabric_capacity.py @@ -22,6 +22,11 @@ def test_voq_switch_fabric_capacity(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + # get state_db infor sdb = dvs.get_state_db() # There are 16 fabric ports in the test environment. @@ -30,8 +35,6 @@ def test_voq_switch_fabric_capacity(self, vst): cdb_port = "Fabric"+str(portNum) sdb_port = "PORT"+str(portNum) - max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) - # setup test environment sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) diff --git a/tests/test_fabric_port.py b/tests/test_fabric_port.py index a7ad9958b0..dbdd235605 100644 --- a/tests/test_fabric_port.py +++ b/tests/test_fabric_port.py @@ -21,15 +21,22 @@ def test_voq_switch_fabric_link(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": - # get config_db information + # get app_db/config_db information cdb = dvs.get_config_db() + adb = dvs.get_app_db() + + # check if the fabric montior toggle working + cdb.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}) + + cdb.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}) # set config_db to isolateStatus: True cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) # check if appl_db value changes to isolateStatus: True - adb = dvs.get_app_db() adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "True"}) # cleanup diff --git a/tests/test_fabric_port_isolation.py b/tests/test_fabric_port_isolation.py index d1b57a019f..48e3281ae9 100644 --- a/tests/test_fabric_port_isolation.py +++ b/tests/test_fabric_port_isolation.py @@ -21,6 +21,11 @@ def test_voq_switch_fabric_link(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + # get state_db infor sdb = dvs.get_state_db() # key @@ -30,7 +35,6 @@ def test_voq_switch_fabric_link(self, vst): port = "PORT"+str(portNum) # wait for link monitoring algorithm skips init pollings sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "TEST"}) - max_poll = PollingConfig(polling_interval=60, timeout=1200, strict=True) if sdb.get_entry("FABRIC_PORT_TABLE", port)['STATUS'] == 'up': try: # clean up the system for the testing port. diff --git a/tests/test_fabric_rate.py b/tests/test_fabric_rate.py index 59e5303de3..1885aca2a9 100644 --- a/tests/test_fabric_rate.py +++ b/tests/test_fabric_rate.py @@ -22,6 +22,10 @@ def test_voq_switch_fabric_rate(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) # get state_db infor sdb = dvs.get_state_db() @@ -31,7 +35,6 @@ def test_voq_switch_fabric_rate(self, vst): portNum = random.randint(1, 16) sdb_port = "PORT"+str(portNum) - max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) tx_rate = sdb.get_entry("FABRIC_PORT_TABLE", sdb_port)['OLD_TX_DATA'] sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) sdb.wait_for_field_negative_match("FABRIC_PORT_TABLE", sdb_port, {'OLD_TX_DATA': tx_rate}, polling_config=max_poll) From 7c7cece4af7d0c9714bea23ee94a2e6d2087617d Mon Sep 17 00:00:00 2001 From: Arvindsrinivasan Lakshmi Narasimhan <55814491+arlakshm@users.noreply.github.com> Date: Thu, 30 May 2024 14:28:23 -0700 Subject: [PATCH 04/14] [Chassis][voq] remote link down ECMP acceleration (#3150) What I did Handle update of nexthops from nexthop group when the port on the remote linecards goes up or down MSFT ADO 28218126 Why I did it Handing of remote port state change faster before the protocol converges. --- orchagent/intfsorch.cpp | 52 ++++++- orchagent/intfsorch.h | 1 + orchagent/neighorch.cpp | 31 ++++ orchagent/neighorch.h | 2 + orchagent/portsorch.cpp | 11 ++ tests/test_virtual_chassis.py | 151 +++++++++++++++++++- tests/virtual_chassis/1/default_config.json | 7 +- 7 files changed, 250 insertions(+), 5 deletions(-) diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index a02c628fcf..dc4e797bea 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -715,7 +715,7 @@ void IntfsOrch::doTask(Consumer &consumer) bool mpls = false; string vlan = ""; string loopbackAction = ""; - + string oper_status =""; for (auto idx : data) { const auto &field = fvField(idx); @@ -807,6 +807,10 @@ void IntfsOrch::doTask(Consumer &consumer) { loopbackAction = value; } + else if (field == "oper_status") + { + oper_status = value; + } } if (alias == "eth0" || alias == "docker0") @@ -860,7 +864,19 @@ void IntfsOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); continue; } - + if(table_name == CHASSIS_APP_SYSTEM_INTERFACE_TABLE_NAME) + { + if(isRemoteSystemPortIntf(alias)) + { + SWSS_LOG_INFO("Handle remote systemport intf %s, oper status %s", alias.c_str(), oper_status.c_str()); + bool isUp = (oper_status == "up") ? true : false; + if (!gNeighOrch->ifChangeInformRemoteNextHop(alias, isUp)) + { + SWSS_LOG_WARN("Unable to update the nexthop for port %s, oper status %s", alias.c_str(), oper_status.c_str()); + } + + } + } //Voq Inband interface config processing if(inband_type.size() && !ip_prefix_in_key) { @@ -1656,7 +1672,10 @@ void IntfsOrch::voqSyncAddIntf(string &alias) return; } - FieldValueTuple nullFv ("NULL", "NULL"); + + string oper_status = port.m_oper_status == SAI_PORT_OPER_STATUS_UP ? "up" : "down"; + + FieldValueTuple nullFv ("oper_status", oper_status); vector attrs; attrs.push_back(nullFv); @@ -1696,3 +1715,30 @@ void IntfsOrch::voqSyncDelIntf(string &alias) m_tableVoqSystemInterfaceTable->del(alias); } +void IntfsOrch::voqSyncIntfState(string &alias, bool isUp) +{ + Port port; + string port_alias; + if(gPortsOrch->getPort(alias, port)) + { + if (port.m_type == Port::LAG) + { + if (port.m_system_lag_info.switch_id != gVoqMySwitchId) + { + return; + } + port_alias = port.m_system_lag_info.alias; + } + else + { + if(port.m_system_port_info.type == SAI_SYSTEM_PORT_TYPE_REMOTE) + { + return; + } + port_alias = port.m_system_port_info.alias; + } + SWSS_LOG_NOTICE("Syncing system interface state %s for port %s", isUp ? "up" : "down", port_alias.c_str()); + m_tableVoqSystemInterfaceTable->hset(port_alias, "oper_status", isUp ? "up" : "down"); + } + +} \ No newline at end of file diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index 71d89be725..aa5129bef4 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -72,6 +72,7 @@ class IntfsOrch : public Orch bool isRemoteSystemPortIntf(string alias); bool isLocalSystemPortIntf(string alias); + void voqSyncIntfState(string &alias, bool); private: diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 6e752ba09c..006f456a1c 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -340,6 +340,8 @@ bool NeighOrch::setNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_flag auto nhop = m_syncdNextHops.find(nexthop); bool rc = false; + SWSS_LOG_INFO("setNextHopFlag on %s seen on port %s ", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); assert(nhop != m_syncdNextHops.end()); if (nhop->second.nh_flags & nh_flag) @@ -379,6 +381,8 @@ bool NeighOrch::clearNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_fl nhop->second.nh_flags &= ~nh_flag; uint32_t count; + SWSS_LOG_INFO("clearnexthop on %s seen on port %s ", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); switch (nh_flag) { case NHFLAGS_IFDOWN: @@ -1901,3 +1905,30 @@ bool NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddre return false; } + +bool NeighOrch::ifChangeInformRemoteNextHop(const string &alias, bool if_up) +{ + SWSS_LOG_ENTER(); + bool rc = true; + Port inbp; + gPortsOrch->getInbandPort(inbp); + for (auto nbr = m_syncdNeighbors.begin(); nbr != m_syncdNeighbors.end(); ++nbr) + { + if (nbr->first.alias != alias) + { + continue; + } + SWSS_LOG_INFO("Found remote Neighbor %s on %s", nbr->first.ip_address.to_string().c_str(), alias.c_str()); + NextHopKey nhop = { nbr->first.ip_address, inbp.m_alias }; + + if (if_up) + { + rc = clearNextHopFlag(nhop, NHFLAGS_IFDOWN); + } + else + { + rc = setNextHopFlag(nhop, NHFLAGS_IFDOWN); + } + } + return rc; +} \ No newline at end of file diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index e72979ad07..f44741fa37 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -72,6 +72,7 @@ class NeighOrch : public Orch, public Subject, public Observer bool removeTunnelNextHop(const NextHopKey&); bool ifChangeInformNextHop(const string &, bool); + bool isNextHopFlagSet(const NextHopKey &, const uint32_t); bool removeOverlayNextHop(const NextHopKey &); void update(SubjectType, void *); @@ -81,6 +82,7 @@ class NeighOrch : public Orch, public Subject, public Observer void resolveNeighbor(const NeighborEntry &); void updateSrv6Nexthop(const NextHopKey &, const sai_object_id_t &); + bool ifChangeInformRemoteNextHop(const string &, bool); private: PortsOrch *m_portsOrch; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index bac498fa3d..f370d09607 100644 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -7986,6 +7986,8 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) isUp ? "up" : "down"); } } + SWSS_LOG_INFO("Updating the nexthop for port %s and operational status %s", port.m_alias.c_str(), isUp ? "up" : "down"); + if (!gNeighOrch->ifChangeInformNextHop(port.m_alias, isUp)) { SWSS_LOG_WARN("Inform nexthop operation failed for interface %s", port.m_alias.c_str()); @@ -7998,6 +8000,15 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) } } + if(gMySwitchType == "voq") + { + if (gIntfsOrch->isLocalSystemPortIntf(port.m_alias)) + { + gIntfsOrch->voqSyncIntfState(port.m_alias, isUp); + } + } + + PortOperStateUpdate update = {port, status}; notify(SUBJECT_TYPE_PORT_OPER_STATE_CHANGE, static_cast(&update)); } diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index 5401f6870f..cd1e66a2b8 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -61,7 +61,70 @@ def del_inbandif_port(self, vct, ibport): # Applicable only for line cards if cfg_switch_type == "voq": config_db.delete_entry("VOQ_INBAND_INTERFACE", f"{ibport}") - + + def get_lc_dvs(self, vct, lc_switch_id): + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + switch_id = metatbl.get("switch_id") + assert switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" + if lc_switch_id == switch_id: + return dvs + + def get_sup_dvs(self, vct): + dvss = vct.dvss + for name in dvss.keys(): + if name.startswith("supervisor"): + return dvss[name] + + def configure_neighbor(self, dvs, action, test_neigh_ip, mac_address, test_neigh_dev): + _, res = dvs.runcmd(['sh', "-c", "ip neigh show"]) + if action == "add": + _, res = dvs.runcmd(['sh', "-c", f"ip neigh {action} {test_neigh_ip} lladdr {mac_address} dev {test_neigh_dev}"]) + assert res == "", "Error configuring static neigh" + else: + _, res = dvs.runcmd(['sh', "-c", f"ip neigh del {test_neigh_ip} dev {test_neigh_dev}"]) + assert res == "", "Error deleting static neigh" + + def get_num_of_ecmp_paths_from_asic_db(self, dvs, ip_prefix): + # get the route entry + routes = dvs.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + + + # find the entry for the interested prefix + route_key = "" + for route in routes: + if ip_prefix in route: + route_key = route + break + + assert route_key != "", "Route not found" + + # get the nexthop group oid + route_entry =dvs.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", route_key) + nhg_id = route_entry.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", None) + + assert nhg_id is not None, "nexthop group is not found" + + # find the nexthop in the nexthop group member table which belong the nhg_id + nhs = dvs.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + count = 0 + for nh in nhs: + nh_entry = dvs.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nh) + nh_nhg_id = nh_entry.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID", None) + + if nh_nhg_id == nhg_id: + count+=1 + + return count + def test_connectivity(self, vct): if vct is None: return @@ -972,7 +1035,93 @@ def test_chassis_wred_profile_on_system_ports(self, vct): # Total number of logs = (No of system ports * No of lossless priorities) - No of lossless priorities for CPU ports assert logSeen.strip() == str(len(system_ports)*2 - 2) + + def test_chassis_system_intf_status(self, vct): + dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + for key in keys: + intf = chassis_app_db.get_entry("SYSTEM_INTERFACE", key) + # Get the oper_status + oper_status = intf.get("oper_status", "unknown") + assert oper_status != "unknown", "System interface oper status is unknown" + + def test_remote_port_down(self, vct): + # test params + local_lc_switch_id = '0' + remote_lc_switch_id = '2' + test_system_port = "lc1|Asic0|Ethernet4" + test_prefix = "13.13.0.0/16" + inband_port = "Ethernet0" + test_neigh_ip_1 = "10.8.104.10" + test_neigh_dev_1 = "Ethernet4" + test_neigh_mac_1 = "00:01:02:03:04:05" + test_neigh_ip_2 = "10.8.108.10" + test_neigh_dev_2 = "Ethernet8" + test_neigh_mac_2 = "00:01:02:03:04:06" + + local_lc_dvs = self.get_lc_dvs(vct, local_lc_switch_id) + remote_lc_dvs = self.get_lc_dvs(vct, remote_lc_switch_id) + # config inband port + self.config_inbandif_port(vct, inband_port) + + # add 2 neighbors + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_1) + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_2, test_neigh_mac_2, test_neigh_dev_2) + + time.sleep(30) + + # add route of LC1(pretend learnt via bgp) + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route add {test_prefix} nexthop via {test_neigh_ip_1} nexthop via {test_neigh_ip_2}"]) + assert res == "", "Error configuring route" + time.sleep(10) + # verify 2 nexthops are programmed in asic_db + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs, test_prefix) + assert paths == 2, "ECMP paths not configured" + + # shut down port on LC0 + local_lc_dvs.port_admin_set("Ethernet4", "down") + time.sleep(10) + + # verify the port oper status is down in chassis db + sup_dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, sup_dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + port_status = chassis_app_db.get_entry("SYSTEM_INTERFACE", test_system_port) + oper_status = port_status.get("oper_status", "unknown") + assert oper_status == "down", "System interface oper status is not down" + + # verify the number of paths is reduced by 1 + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs, test_prefix) + assert paths == 1, "Remote port down does not remote ecmp member" + + # shut down port on LC0 + local_lc_dvs.port_admin_set("Ethernet4", "up") + time.sleep(10) + + # verify the port oper status is up in chassis db + sup_dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, sup_dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + port_status = chassis_app_db.get_entry("SYSTEM_INTERFACE", test_system_port) + oper_status = port_status.get("oper_status", "unknown") + assert oper_status == "up", "System interface oper status is not down" + + # verify the number of paths is reduced by 1 + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs,test_prefix) + assert paths == 2, "Remote port up is not added in nexthop group" + + #cleanup + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route del {test_prefix} nexthop via {test_neigh_ip_1} nexthop via {test_neigh_ip_2}"]) + assert res == "", "Error configuring route" + # Cleanup inband if configuration + self.del_inbandif_port(vct, inband_port) + + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/virtual_chassis/1/default_config.json b/tests/virtual_chassis/1/default_config.json index 88769c9ce6..8cea66ee12 100644 --- a/tests/virtual_chassis/1/default_config.json +++ b/tests/virtual_chassis/1/default_config.json @@ -15,8 +15,10 @@ "INTERFACE": { "Ethernet0": {}, "Ethernet4": {}, + "Ethernet8": {}, "Ethernet0|10.8.101.1/24": {}, - "Ethernet4|10.8.104.1/24": {} + "Ethernet4|10.8.104.1/24": {}, + "Ethernet8|10.8.108.1/24": {} }, "PORT": { "Ethernet0": { @@ -24,6 +26,9 @@ }, "Ethernet4": { "admin_status": "up" + }, + "Ethernet8": { + "admin_status": "up" } }, "SYSTEM_PORT": { From f7376636b4ac31fa84721c5ae870080e517c8841 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Thu, 30 May 2024 15:09:36 -0700 Subject: [PATCH 05/14] Add sai call to isolate/unisolate a fabric port (#3141) * Add sai call to isolate/unisolate a fabric port --- orchagent/fabricportsorch.cpp | 60 +++++++++++++++++++++++++++-- tests/test_fabric_port_isolation.py | 12 ++++++ 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index 159f415beb..80a938e38e 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -545,6 +545,7 @@ void FabricPortsOrch::updateFabricDebugCounters() int autoIsolated = 0; int cfgIsolated = 0; int isolated = 0; + int origIsolated = 0; string lnkStatus = "down"; string testState = "product"; @@ -643,6 +644,12 @@ void FabricPortsOrch::updateFabricDebugCounters() if (fvField(val) == "AUTO_ISOLATED") { autoIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s currently autoisolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "ISOLATED") + { + origIsolated = to_uint(valuePt); SWSS_LOG_INFO("port %s currently isolated: %s", key.c_str(),valuePt.c_str()); continue; } @@ -816,6 +823,36 @@ void FabricPortsOrch::updateFabricDebugCounters() } // if "ISOLATED" is true, Call SAI api here to actually isolated the link // if "ISOLATED" is false, Call SAP api to actually unisolate the link + + if (origIsolated != isolated) + { + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_FABRIC_ISOLATE; + bool setVal = false; + if (isolated == 1) + { + setVal = true; + } + attr.value.booldata = setVal; + SWSS_LOG_NOTICE("Set fabric port %d with isolate %d ", lane, isolated); + if (m_fabricLanePortMap.find(lane) == m_fabricLanePortMap.end()) + { + SWSS_LOG_NOTICE("NOT find fabric lane %d ", lane); + } + else + { + sai_status_t status = sai_port_api->set_port_attribute(m_fabricLanePortMap[lane], &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set admin status"); + } + SWSS_LOG_NOTICE("Set fabric port %d state done %d ", lane, isolated); + } + } + else + { + SWSS_LOG_INFO( "Same isolation status for %d", lane); + } } else { @@ -1309,9 +1346,6 @@ void FabricPortsOrch::doFabricPortTask(Consumer &consumer) } SWSS_LOG_NOTICE("key %s alias %s isolateStatus %s lanes %s", key.c_str(), alias.c_str(), isolateStatus.c_str(), lanes.c_str()); - // Call SAI api to isolate/unisolate the link here. - // Isolate the link if isolateStatus is True. - // Unisolate the link if isolateStatus is False. if (isolateStatus == "False") { @@ -1372,6 +1406,26 @@ void FabricPortsOrch::doFabricPortTask(Consumer &consumer) // AUTO_ISOLATED 0 m_stateTable->hset(state_key, "AUTO_ISOLATED", m_defaultAutoIsolated.c_str()); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_FABRIC_ISOLATE; + bool setVal = false; + attr.value.booldata = setVal; + SWSS_LOG_NOTICE("Set port %s to unisolate %s ", alias.c_str(), isolateStatus.c_str()); + int idx = stoi(lanes); + if (m_fabricLanePortMap.find(idx) == m_fabricLanePortMap.end()) + { + SWSS_LOG_NOTICE("NOT find %s alias. ", alias.c_str()); + } + else + { + sai_status_t status = sai_port_api->set_port_attribute(m_fabricLanePortMap[idx], &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set admin status"); + } + SWSS_LOG_NOTICE( "Set Port %s unisolation state done", alias.c_str()); + } } } } diff --git a/tests/test_fabric_port_isolation.py b/tests/test_fabric_port_isolation.py index 48e3281ae9..9743a4b702 100644 --- a/tests/test_fabric_port_isolation.py +++ b/tests/test_fabric_port_isolation.py @@ -50,6 +50,18 @@ def test_voq_switch_fabric_link(self, vst): # clear the testing errors and wait for link get unisolated. sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + + # inject testing errors and wait for link get isolated again. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + # now test force unisolate this link + configKey = "Fabric"+str(portNum) + curForceStatus = int( config_db.get_entry( "FABRIC_PORT", configKey)['forceUnisolateStatus'] ) + curForceStatus += 1 + config_db.update_entry("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}) + config_db.wait_for_field_match("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}, + polling_config=max_poll) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) finally: # cleanup sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) From 4ae8bfa3c61b2ff3582cb1a762f2abb0be7728f6 Mon Sep 17 00:00:00 2001 From: Song Yuan <64041228+ysmanman@users.noreply.github.com> Date: Thu, 30 May 2024 15:10:26 -0700 Subject: [PATCH 06/14] Read switch_id of fabric switch from config_db (#3102) * Read switch_id of fabric switch from config_db. * Validate fabric switch_id before create fabric switch. * Add fabric switch_id for virtual chassis * Add unit test for handling fabric switch_id. --- orchagent/main.cpp | 22 ++++++++++ tests/dvslib/dvs_database.py | 14 +++++- tests/test_fabric_switch_id.py | 48 +++++++++++++++++++++ tests/virtual_chassis/8/default_config.json | 1 + 4 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 tests/test_fabric_switch_id.py diff --git a/orchagent/main.cpp b/orchagent/main.cpp index ad03648a7d..0a804eb38c 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -588,6 +588,28 @@ int main(int argc, char **argv) attr.value.u32 = SAI_SWITCH_TYPE_FABRIC; attrs.push_back(attr); + //Read switch_id from config_db. + Table cfgDeviceMetaDataTable(&config_db, CFG_DEVICE_METADATA_TABLE_NAME); + string value; + if (cfgDeviceMetaDataTable.hget("localhost", "switch_id", value)) + { + if (value.size()) + { + gVoqMySwitchId = stoi(value); + } + + if (gVoqMySwitchId < 0) + { + SWSS_LOG_ERROR("Invalid fabric switch id %d configured", gVoqMySwitchId); + exit(EXIT_FAILURE); + } + } + else + { + SWSS_LOG_ERROR("Fabric switch id is not configured"); + exit(EXIT_FAILURE); + } + attr.id = SAI_SWITCH_ATTR_SWITCH_ID; attr.value.u32 = gVoqMySwitchId; attrs.push_back(attr); diff --git a/tests/dvslib/dvs_database.py b/tests/dvslib/dvs_database.py index 553c0d7710..6724698289 100644 --- a/tests/dvslib/dvs_database.py +++ b/tests/dvslib/dvs_database.py @@ -109,7 +109,19 @@ def delete_field(self, table_name: str, key: str, field: str) -> None: """ table = swsscommon.Table(self.db_connection, table_name) table.hdel(key, field) - + + def set_field(self, table_name: str, key: str, field: str, value: str) -> None: + """Add/Update a field in an entry stored at `key` in the specified table. + + Args: + table_name: The name of the table where the entry is being removed. + key: The key that maps to the entry being added/updated. + field: The field that needs to be added/updated. + value: The value that is set for the field. + """ + table = swsscommon.Table(self.db_connection, table_name) + table.hset(key, field, value) + def get_keys(self, table_name: str) -> List[str]: """Get all of the keys stored in the specified table. diff --git a/tests/test_fabric_switch_id.py b/tests/test_fabric_switch_id.py new file mode 100644 index 0000000000..f6f76011d5 --- /dev/null +++ b/tests/test_fabric_switch_id.py @@ -0,0 +1,48 @@ +from dvslib.dvs_common import wait_for_result, PollingConfig +import pytest + +class TestFabricSwitchId(object): + def check_syslog(self, dvs, marker, log): + def do_check_syslog(): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" %(marker, log)]) + return (int(out.strip()) >= 1, None) + max_poll = PollingConfig(polling_interval=5, timeout=600, strict=True) + wait_for_result(do_check_syslog, polling_config=max_poll) + + def test_invalid_fabric_switch_id(self, vst): + # Find supervisor dvs. + dvs = None + config_db = None + for name in vst.dvss.keys(): + dvs = vst.dvss[name] + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + break + assert dvs and config_db + + # Verify orchagent's handling of invalid fabric switch_id in following cases: + # - Invalid fabric switch_id, e.g, -1, is set. + # - fabric switch_id is missing in ConfigDb. + for invalid_switch_id in (-1, None): + print(f"Test invalid switch id {invalid_switch_id}") + if invalid_switch_id is None: + config_db.delete_field("DEVICE_METADATA", "localhost", "switch_id") + expected_log = "Fabric switch id is not configured" + else: + config_db.set_field("DEVICE_METADATA", "localhost", "switch_id", str(invalid_switch_id)) + expected_log = f"Invalid fabric switch id {invalid_switch_id} configured" + + # Restart orchagent and verify orchagent behavior by checking syslog. + dvs.stop_swss() + marker = dvs.add_log_marker() + dvs.start_swss() + self.check_syslog(dvs, marker, expected_log) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/virtual_chassis/8/default_config.json b/tests/virtual_chassis/8/default_config.json index 6f77a1ade2..4160d7dd92 100644 --- a/tests/virtual_chassis/8/default_config.json +++ b/tests/virtual_chassis/8/default_config.json @@ -5,6 +5,7 @@ "chassis_db_address" : "10.8.1.200", "inband_address" : "10.8.1.200/24", "switch_type": "fabric", + "switch_id": "0", "sub_role" : "BackEnd", "start_chassis_db" : "1", "comment" : "default_config for a vs that runs chassis_db" From 8e8fc66c4b658e231c387628da9979c0450aa11c Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Fri, 31 May 2024 14:28:58 +0800 Subject: [PATCH 07/14] [ci] Migrate from sonicbld to sonicbld-1es to fix S360 alert. (#3171) What I did Agent pool migration to fix S360 alert. Why I did it --- .azure-pipelines/build-template.yml | 2 +- .azure-pipelines/gcov.yml | 2 +- .azure-pipelines/test-docker-sonic-vs-template.yml | 3 ++- azure-pipelines.yml | 6 +++--- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index 9c7e84b208..0a680e35de 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -10,7 +10,7 @@ parameters: - name: pool type: string values: - - sonicbld + - sonicbld-1es - sonicbld-armhf - sonicbld-arm64 - default diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 9bce6feccd..9b13a85502 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -8,7 +8,7 @@ parameters: - name: pool type: string values: - - sonicbld + - sonicbld-1es - default default: default diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 9eca60f0b6..db66b03472 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -91,7 +91,8 @@ jobs: sudo apt-add-repository https://packages.microsoft.com/ubuntu/20.04/prod sudo apt-get update sudo apt-get install -y dotnet-sdk-7.0 - sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin + sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin 2>&1 | tee log.log || grep 'already installed' log.log + rm log.log displayName: "Install .NET CORE" - script: | diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e3255ba15b..f345319c03 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -41,7 +41,7 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - pool: sonicbld + pool: sonicbld-1es sonic_slave: sonic-slave-bullseye common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common @@ -56,7 +56,7 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - pool: sonicbld + pool: sonicbld-1es sonic_slave: sonic-slave-bullseye common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common @@ -99,7 +99,7 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - pool: sonicbld + pool: sonicbld-1es sonic_slave: sonic-slave-bookworm common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common-bookworm From 1f12a8db7bb30bf7367c121e5daf1b6cdcdc6957 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Sat, 1 Jun 2024 01:05:17 +0800 Subject: [PATCH 08/14] [subnet decap] Add subnet decap rule based on overlay ECMP (#3153) * [subnet decap] Add subnet decap rule based on overlay ECMP vxlan tunnel route What I did Support dynamic decap rule generation based on the Vxlan tunnel route of Overlay ECMP. This depends on: #3117 Why I did it To enable SONiC with the capability to decap IPinIP packets with dest IP in the Overlay ECMP Vxlan tunnel route prefix. --- orchagent/vnetorch.cpp | 50 ++++++- orchagent/vnetorch.h | 4 + tests/test_vnet.py | 324 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 376 insertions(+), 2 deletions(-) diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index b976c728a7..5c482d726d 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -21,6 +21,7 @@ #include "neighorch.h" #include "crmorch.h" #include "routeorch.h" +#include "tunneldecaporch.h" #include "flowcounterrouteorch.h" extern sai_virtual_router_api_t* sai_virtual_router_api; @@ -43,6 +44,7 @@ extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; extern SwitchOrch *gSwitchOrch; +extern TunnelDecapOrch *gTunneldecapOrch; /* * VRF Modeling and VNetVrf class definitions */ @@ -334,7 +336,7 @@ VNetVrfObject::~VNetVrfObject() set vr_ent = getVRids(); for (auto it : vr_ent) { - if (it != gVirtualRouterId) + if (it != gVirtualRouterId) { sai_status_t status = sai_virtual_router_api->remove_virtual_router(it); if (status != SAI_STATUS_SUCCESS) @@ -717,7 +719,8 @@ static bool update_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_obj } VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *vnetOrch) - : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME) + : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME), + app_tunnel_decap_term_producer_(db, APP_TUNNEL_DECAP_TERM_TABLE_NAME) { SWSS_LOG_ENTER(); @@ -1432,6 +1435,39 @@ bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, return true; } +inline void VNetRouteOrch::createSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + if (!config.enable || subnet_decap_terms_created_.find(ipPrefix) != subnet_decap_terms_created_.end()) + { + return; + } + SWSS_LOG_NOTICE("Add subnet decap term for %s", ipPrefix.to_string().c_str()); + static const vector data = { + {"term_type", "MP2MP"}, + {"subnet_type", "vip"} + }; + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + app_tunnel_decap_term_producer_.set(key, data); + subnet_decap_terms_created_.insert(ipPrefix); +} + +inline void VNetRouteOrch::removeSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + auto it = subnet_decap_terms_created_.find(ipPrefix); + if (it == subnet_decap_terms_created_.end()) + { + return; + } + SWSS_LOG_NOTICE("Remove subnet decap term for %s", ipPrefix.to_string().c_str()); + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + app_tunnel_decap_term_producer_.del(key); + subnet_decap_terms_created_.erase(it); +} + template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, nextHop& nh, string& op) @@ -2088,6 +2124,14 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH removeRouteAdvertisement(prefix_to_use); } } + if (route_state == "active") + { + createSubnetDecapTerm(prefix_to_use); + } + else if (route_state == "inactive") + { + removeSubnetDecapTerm(prefix_to_use); + } } void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) @@ -2101,11 +2145,13 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) if(adv_prefix_refcount_[adv_pfx] == 1) { removeRouteAdvertisement(adv_pfx); + removeSubnetDecapTerm(adv_pfx); } } else { removeRouteAdvertisement(ipPrefix); + removeSubnetDecapTerm(ipPrefix); } } diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 0cffa115fd..e2ba25d0a5 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -463,6 +463,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void updateVnetTunnel(const BfdUpdate&); void updateVnetTunnelCustomMonitor(const MonitorUpdate& update); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); + void createSubnetDecapTerm(const IpPrefix &ipPrefix); + void removeSubnetDecapTerm(const IpPrefix &ipPrefix); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, @@ -485,7 +487,9 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer std::map nexthop_info_; std::map prefix_to_adv_prefix_; std::map adv_prefix_refcount_; + std::set subnet_decap_terms_created_; ProducerStateTable bfd_session_producer_; + ProducerStateTable app_tunnel_decap_term_producer_; unique_ptr monitor_session_producer_; shared_ptr state_db_; shared_ptr app_db_; diff --git a/tests/test_vnet.py b/tests/test_vnet.py index c28d7cf320..be08a52c69 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -1,4 +1,5 @@ import time +import ipaddress import json import random import time @@ -541,6 +542,62 @@ def check_syslog(dvs, marker, err_log): assert num.strip() == "0" +def create_fvs(**kwargs): + return swsscommon.FieldValuePairs(list(kwargs.items())) + + +def create_subnet_decap_tunnel(dvs, tunnel_name, **kwargs): + """Create tunnel and verify all needed entries in state DB exists.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + fvs = create_fvs(**kwargs) + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps.set(tunnel_name, fvs) + + # wait till config will be applied + time.sleep(1) + + # validate the tunnel entry in state db + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + tunnels = tunnel_state_table.getKeys() + for tunnel in tunnels: + status, fvs = tunnel_state_table.get(tunnel) + assert status == True + + for field, value in fvs: + if field == "tunnel_type": + assert value == "IPINIP" + elif field == "dscp_mode": + assert value == kwargs["dscp_mode"] + elif field == "ecn_mode": + assert value == kwargs["ecn_mode"] + elif field == "ttl_mode": + assert value == kwargs["ttl_mode"] + elif field == "encap_ecn_mode": + assert value == kwargs["encap_ecn_mode"] + else: + assert False, "Field %s is not tested" % field + + +def delete_subnet_decap_tunnel(dvs, tunnel_name): + """Delete tunnel and checks that state DB is cleared.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tunnel_app_table = swsscommon.Table(appdb, "TUNNEL_DECAP_TABLE") + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps._del(tunnel_name) + + # wait till config will be applied + time.sleep(1) + + assert len(tunnel_app_table.getKeys()) == 0 + assert len(tunnel_state_table.getKeys()) == 0 + + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -577,11 +634,27 @@ class VnetVxlanVrfTunnel(object): ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" APP_VNET_MONITOR = "VNET_MONITOR_TABLE" + ecn_modes_map = { + "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", + "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" + } + + dscp_modes_map = { + "pipe" : "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL" + } + + ttl_modes_map = { + "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" + } + def __init__(self): self.tunnel_map_ids = set() self.tunnel_map_entry_ids = set() self.tunnel_ids = set() self.tunnel_term_ids = set() + self.ipinip_tunnel_term_ids = {} self.tunnel_map_map = {} self.tunnel = {} self.vnet_vr_ids = set() @@ -611,6 +684,61 @@ def fetch_exist_entries(self, dvs): if switch_mac is None: switch_mac = get_switch_mac(dvs) + def check_ipinip_tunnel(self, dvs, tunnel_name, dscp_mode, ecn_mode, ttl_mode): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) + tunnel_attrs = { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE': self.dscp_modes_map[dscp_mode], + 'SAI_TUNNEL_ATTR_ENCAP_ECN_MODE': self.ecn_modes_map[ecn_mode], + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': self.ttl_modes_map[ttl_mode] + } + check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, tunnel_attrs) + + self.tunnel_ids.add(tunnel_id) + self.tunnel[tunnel_name] = tunnel_id + + def check_del_ipinip_tunnel(self, dvs, tunnel_name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id) + self.tunnel_ids.remove(tunnel_id) + assert tunnel_id == self.tunnel[tunnel_name] + self.tunnel.pop(tunnel_name) + + def check_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + tunnel_term_attrs = { + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': str(dst_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK': str(dst_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP': str(src_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK': str(src_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': self.tunnel[tunnel_name] + } + check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, tunnel_term_attrs) + + self.tunnel_term_ids.add(tunnel_term_id) + self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] = tunnel_term_id + + def check_del_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id) + self.tunnel_term_ids.remove(tunnel_term_id) + assert self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] == tunnel_term_id + self.ipinip_tunnel_term_ids.pop((tunnel_name, src_ip, dst_ip)) + def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) global loopback_id, def_vr_id @@ -1099,6 +1227,30 @@ def check_custom_monitor_deleted(self, dvs, prefix, endpoint): class TestVnetOrch(object): + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" + + @pytest.fixture + def setup_subnet_decap(self, dvs): + + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + fvs = create_fvs(**subnet_decap_config) + subnet_decap_tbl.set("AZURE", fvs) + + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + _cleanup_subnet_decap_config() + + yield _apply_subnet_decap_config + + _cleanup_subnet_decap_config() + def get_vnet_obj(self): return VnetVxlanVrfTunnel() @@ -3524,6 +3676,178 @@ def test_vnet_orch_25(self, dvs, testlog): vnet_obj.check_del_vnet_entry(dvs, 'Vnet25') delete_vxlan_tunnel(dvs, tunnel_name) + ''' + Test 26 - Test for vnet tunnel routes with ECMP nexthop group with subnet decap enable + ''' + def test_vnet_orch_26(self, dvs, setup_subnet_decap): + # apply subnet decap config + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) + + # Add the subnet decap tunnel + create_subnet_decap_tunnel(dvs, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe") + vnet_obj.check_ipinip_tunnel(dvs, "IPINIP_SUBNET", "uniform", "standard", "pipe") + + vnet_obj.fetch_exist_entries(dvs) + tunnel_name = 'tunnel_26' + create_vxlan_tunnel(dvs, tunnel_name, '26.26.26.26') + create_vnet_entry(dvs, 'Vnet26', tunnel_name, '10026', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, 'Vnet26') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet26', '10026') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '26.26.26.26') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet26', '26.0.0.1,26.0.0.2,26.0.0.3', ep_monitor='26.1.0.1,26.1.0.2,26.1.0.3', profile="test_profile") + + with pytest.raises(AssertionError): + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '26.1.0.1', 'Up') + + time.sleep(2) + # subnet decap term should be created as one bfd session state go up + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") + + update_bfd_session_state(dvs, '26.1.0.2', 'Up') + update_bfd_session_state(dvs, '26.1.0.3', 'Up') + time.sleep(2) + vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet26', ['26.0.0.1', '26.0.0.2', '26.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", ['26.0.0.1', '26.0.0.2', '26.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '26.1.0.1', 'Down') + update_bfd_session_state(dvs, '26.1.0.2', 'Down') + update_bfd_session_state(dvs, '26.1.0.3', 'Down') + time.sleep(2) + + # subnet decap term should be removed as all bfd session states go down + vnet_obj.check_del_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove tunnel route + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet26') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3']) + + delete_vnet_entry(dvs, 'Vnet26') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet26') + delete_vxlan_tunnel(dvs, tunnel_name) + + # Remove the subnet decap tunnel + vnet_obj.fetch_exist_entries(dvs) + delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET") + vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET") + + ''' + Test 27 - Test for IPv6 vnet tunnel routes with ECMP nexthop group with subnet decap enable + ''' + def test_vnet_orch_27(self, dvs, setup_subnet_decap): + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) + + # Add the subnet decap tunnel + create_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe") + vnet_obj.check_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6", "uniform", "standard", "pipe") + + vnet_obj.fetch_exist_entries(dvs) + tunnel_name = 'tunnel_27' + vnet_name = 'Vnet26' + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3', profile="test_profile") + + with pytest.raises(AssertionError): + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "100.100.1.1/32", "10.10.10.0/24") + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + + time.sleep(2) + # subnet decap term should be created as one bfd session state go up + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "fd:10:10::1/128", "20c1:ba8::/64") + + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + time.sleep(2) + vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + check_routes_advertisement(dvs, "fd:10:10::1/128", "test_profile") + + # Set all endpoint to down state + update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + time.sleep(2) + + # subnet decap term should be removed as all bfd session states go down + vnet_obj.check_del_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "fd:10:10::1/128", "20c1:ba8::/64") + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Remove tunnel route + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3']) + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + # Remove the subnet decap tunnel + vnet_obj.fetch_exist_entries(dvs) + delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6") + vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): From 1876a306a7de299839ca49738ba5f56fb32d1ee2 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Fri, 31 May 2024 10:20:15 -0700 Subject: [PATCH 09/14] =?UTF-8?q?Revert=20"Added=20support=20for=20"UNDERL?= =?UTF-8?q?AY=5FSET=5FDSCP"=20and=20"UNDERLAY=5FSET=5FDSCPV6"=20table?= =?UTF-8?q?=E2=80=A6"=20(#3175)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 9ffbcd5892653e12873dc58efd6a6d9bf5d9fc1b. --- orchagent/aclorch.cpp | 647 ++-------------------------------------- orchagent/aclorch.h | 65 +--- orchagent/acltable.h | 6 +- tests/dvslib/dvs_acl.py | 27 +- tests/test_acl_mark.py | 447 --------------------------- 5 files changed, 23 insertions(+), 1169 deletions(-) delete mode 100644 tests/test_acl_mark.py diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index fd20a212b1..5ad908f082 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -11,6 +11,7 @@ #include "timer.h" #include "crmorch.h" #include "sai_serialize.h" + using namespace std; using namespace swss; @@ -32,10 +33,6 @@ extern string gMySwitchType; #define MIN_VLAN_ID 1 // 0 is a reserved VLAN ID #define MAX_VLAN_ID 4095 // 4096 is a reserved VLAN ID -#define METADATA_VALUE_START 1 -#define METADATA_VALUE_END 7 -#define METADATA_VALUE_INVALID 8 - #define STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY "is_action_list_mandatory" #define STATE_DB_ACL_ACTION_FIELD_ACTION_LIST "action_list" #define STATE_DB_ACL_L3V4V6_SUPPORTED "supported_L3V4V6" @@ -44,8 +41,6 @@ extern string gMySwitchType; #define ACL_COUNTER_DEFAULT_POLLING_INTERVAL_MS 10000 // ms #define ACL_COUNTER_DEFAULT_ENABLED_STATE false -#define EGR_SET_DSCP_TABLE_ID "EgressSetDSCP" - const int TCP_PROTOCOL_NUM = 6; // TCP protocol number acl_rule_attr_lookup_t aclMatchLookup = @@ -78,8 +73,7 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, - { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME}, - { MATCH_METADATA, SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META} + { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME} }; static acl_range_type_lookup_t aclRangeTypeLookup = @@ -128,12 +122,6 @@ static acl_packet_action_lookup_t aclPacketActionLookup = { PACKET_ACTION_DROP, SAI_PACKET_ACTION_DROP }, }; -static acl_rule_attr_lookup_t aclMetadataDscpActionLookup = -{ - { ACTION_META_DATA, SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA}, - { ACTION_DSCP, SAI_ACL_ENTRY_ATTR_ACTION_SET_DSCP} -}; - static acl_dtel_flow_op_type_lookup_t aclDTelFlowOpTypeLookup = { { DTEL_FLOW_OP_NOP, SAI_ACL_DTEL_FLOW_OP_NOP }, @@ -361,42 +349,6 @@ static acl_table_action_list_lookup_t defaultAclActionList = } } } - }, - { - // MARK_META - TABLE_TYPE_MARK_META, - { - { - ACL_STAGE_INGRESS, - { - SAI_ACL_ACTION_TYPE_SET_ACL_META_DATA - } - } - } - }, - { - // MARK_METAV6 - TABLE_TYPE_MARK_META_V6, - { - { - ACL_STAGE_INGRESS, - { - SAI_ACL_ACTION_TYPE_SET_ACL_META_DATA - } - } - } - }, - { - // EGR_SET_DSCP - TABLE_TYPE_EGR_SET_DSCP, - { - { - ACL_STAGE_EGRESS, - { - SAI_ACL_ACTION_TYPE_SET_DSCP - } - } - } } }; @@ -462,18 +414,6 @@ static acl_table_match_field_lookup_t stageMandatoryMatchFields = } } } - }, - { - // EGR_SET_DSCP - TABLE_TYPE_EGR_SET_DSCP, - { - { - ACL_STAGE_EGRESS, - { - SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META - } - } - } } }; @@ -766,7 +706,7 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT auto mirrorAction = aclMirrorStageLookup.find(action); auto dtelAction = aclDTelActionLookup.find(action); auto otherAction = aclOtherActionLookup.find(action); - auto metadataAction = aclMetadataDscpActionLookup.find(action); + if (l3Action != aclL3ActionLookup.end()) { saiActionAttr = l3Action->second; @@ -783,10 +723,6 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT { saiActionAttr = otherAction->second; } - else if (metadataAction != aclMetadataDscpActionLookup.end()) - { - saiActionAttr = metadataAction->second; - } else { SWSS_LOG_ERROR("Unknown action %s", action.c_str()); @@ -1096,17 +1032,6 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) return false; } } - else if (attr_name == MATCH_METADATA) - { - matchData.data.u8 = to_uint(attr_value); - matchData.mask.u8 = 0xFF; - // value must be between METADATA_VALUE_START and METADATA_VALUE_END inclusive. - if (matchData.data.u8 < METADATA_VALUE_START || matchData.data.u8 > METADATA_VALUE_END) - { - SWSS_LOG_ERROR("Invalid MATCH_METADATA configuration: %s, expected value between 1-7.", attr_value.c_str()); - return false; - } - } } catch (exception &e) { @@ -1674,7 +1599,7 @@ bool AclRule::getCreateCounter() const return m_createCounter; } -shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data, MetaDataMgr * m_metadataMgr) +shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data) { shared_ptr aclRule; @@ -1690,10 +1615,6 @@ shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOr { return make_shared(acl, rule, table); } - else if (acl->isUsingEgrSetDscp(table) || table == EGR_SET_DSCP_TABLE_ID) - { - return make_shared(acl, rule, table, m_metadataMgr); - } else if (aclDTelActionLookup.find(action) != aclDTelActionLookup.cend()) { if (!dtel) @@ -2221,100 +2142,6 @@ void AclRuleMirror::onUpdate(SubjectType type, void *cntx) } } -AclRuleUnderlaySetDscp::AclRuleUnderlaySetDscp(AclOrch *aclOrch, string rule, string table, MetaDataMgr* m_metaDataMgr, bool createCounter): - AclRule(aclOrch, rule, table, createCounter), - table_id(table), - m_metaDataMgr(m_metaDataMgr) -{ -} - -uint32_t AclRuleUnderlaySetDscp::getDscpValue() const -{ - return cachedDscpValue; -} - -uint32_t AclRuleUnderlaySetDscp::getMetadata() const -{ - return cachedMetadata; -} - -bool AclRuleUnderlaySetDscp::validateAddAction(string attr_name, string _attr_value) -{ - SWSS_LOG_ENTER(); - - string attr_value = to_upper(_attr_value); - - sai_object_id_t table_oid = m_pAclOrch->getTableById(table_id); - auto aclTable = m_pAclOrch->getTableByOid(table_oid); - string type = aclTable->type.getName(); - string key = table_id + ":" + m_id; - // we handle the allocation of metadata for here. based on SET_DSCP action, we check if a metadata is already allocated then we reuse it - // otherwise we allocate a new metadata. This metadata is then set an the action for the Rule of this table. We also cache the SET_DSCP - // value and the allocated metadata in a the rule structure itself so that when we go to addRule we can use these to add the - // egr_set_dscp rule - if (attr_name == ACTION_DSCP && (type == TABLE_TYPE_MARK_META || type == TABLE_TYPE_MARK_META_V6)) - { - if (!m_pAclOrch->isUsingEgrSetDscp(table_id)) - { - - SWSS_LOG_ERROR("Unexpected Error. Table %s not asssociated with EGR_SET_DSCP table", table_id.c_str()); - return false; - } - - u_int8_t actionDscpValue = uint8_t(std::stoi(attr_value)); - cachedDscpValue = actionDscpValue; - auto metadata = m_metaDataMgr->getFreeMetaData(actionDscpValue); - - if (metadata == METADATA_VALUE_INVALID) - { - SWSS_LOG_ERROR("Failed to get free metadata for DSCP value %d", actionDscpValue); - return false; - } - cachedMetadata = metadata; - attr_name = ACTION_META_DATA; - attr_value = std::to_string(metadata); - m_pAclOrch->addMetaDataRef(key, metadata); - } - - - sai_acl_action_data_t actionData; - actionData.parameter.u32 = 0; - - SWSS_LOG_INFO("attr_name: %s, attr_value: %s int val %d", attr_name.c_str(), attr_value.c_str(), to_uint(attr_value)); - // we only handle DSCP and META_DATA actions for now. - if (attr_name == ACTION_DSCP || attr_name == ACTION_META_DATA) - { - actionData.parameter.u32 = to_uint(attr_value); - if (attr_name == ACTION_META_DATA && (actionData.parameter.u32 < METADATA_VALUE_START || actionData.parameter.u32 > METADATA_VALUE_END)) - { - return false; - } - } - else - { - return false; - } - - actionData.enable = true; - return setAction(aclMetadataDscpActionLookup[attr_name], actionData); -} - -bool AclRuleUnderlaySetDscp::validate() -{ - SWSS_LOG_ENTER(); - if ( m_actions.size() != 1) - { - return false; - } - - return true; -} - -void AclRuleUnderlaySetDscp::onUpdate(SubjectType, void *) -{ - // Do nothing -} - AclTable::AclTable(AclOrch *pAclOrch, string id) noexcept : m_pAclOrch(pAclOrch), id(id) { @@ -3577,40 +3404,6 @@ void AclOrch::initDefaultTableTypes() ); } - addAclTableType( - builder.withName(TABLE_TYPE_MARK_META) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) - .build() - ); - - addAclTableType( - builder.withName(TABLE_TYPE_MARK_META_V6) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) - .build() - ); - - addAclTableType( - builder.withName(TABLE_TYPE_EGR_SET_DSCP) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META)) - .build() - ); // Placeholder for control plane tables addAclTableType(builder.withName(TABLE_TYPE_CTRLPLANE).build()); } @@ -3709,7 +3502,7 @@ void AclOrch::putAclActionCapabilityInDB(acl_stage_type_t stage) ostringstream acl_action_value_stream; ostringstream is_action_list_mandatory_stream; - for (const auto& action_map: {aclL3ActionLookup, aclMirrorStageLookup, aclDTelActionLookup, aclMetadataDscpActionLookup}) + for (const auto& action_map: {aclL3ActionLookup, aclMirrorStageLookup, aclDTelActionLookup}) { for (const auto& it: action_map) { @@ -3949,26 +3742,6 @@ void AclOrch::getAddDeletePorts(AclTable &newT, newPortSet.insert(p); } - // if the table type is TABLE_TYPE_EGR_SET_DSCP we use a single instance of this - // table with all the tables of type TABLE_TYPE_MARK_META/v6 therefoere we need to - // to collect all the ports from the tables of type TABLE_TYPE_MARK_META/v6 and - // put them in the newPortSet. - if (curT.id == EGR_SET_DSCP_TABLE_ID) - { - for(auto iter : m_egrSetDscpRef) - { - auto tableOid = getTableById(iter); - auto existingtable = m_AclTables.at(tableOid); - for (auto p : existingtable.pendingPortSet) - { - newPortSet.insert(p); - } - for (auto p : existingtable.portSet) - { - newPortSet.insert(p); - } - } - } // Collect current ports for (auto p : curT.pendingPortSet) { @@ -4075,176 +3848,6 @@ bool AclOrch::updateAclTable(AclTable ¤tTable, AclTable &newTable) return true; } -EgressSetDscpTableStatus AclOrch::addEgrSetDscpTable(string table_id, AclTable &table, string orignalTableTypeName) -{ - SWSS_LOG_ENTER(); - EgressSetDscpTableStatus status = EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_REQUIRED; - AclTable egrSetDscpTable(this); - // we only add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP or TABLE_TYPE_UNDERLAY_SET_DSCPV6 - // otherwise we return EGRESS_SET_DSCP_TABLE_NOT_REQUIRED. - if (orignalTableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCP || orignalTableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCPV6) - { - - AclTable egrSetDscpTable(this); - - // copy ports from the TABLE_TYPE_UNDERLAY_SET_DSCP/v6 to the egrSetDscpTable. - std::set ports; - ports.insert(table.portSet.begin(), table.portSet.end()); - ports.insert(table.pendingPortSet.begin(), table.pendingPortSet.end()); - for (auto alias : ports) - { - Port port; - if (!gPortsOrch->getPort(alias, port)) - { - SWSS_LOG_INFO("Add unready port %s to pending list for ACL table %s", - alias.c_str(), EGR_SET_DSCP_TABLE_ID); - egrSetDscpTable.pendingPortSet.emplace(alias); - continue; - } - - sai_object_id_t bind_port_id; - if (!getAclBindPortId(port, bind_port_id)) - { - SWSS_LOG_ERROR("Failed to get port %s bind port ID for ACL table %s", - alias.c_str(), EGR_SET_DSCP_TABLE_ID); - return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; - } - egrSetDscpTable.link(bind_port_id); - egrSetDscpTable.portSet.emplace(alias); - } - - egrSetDscpTable.id = EGR_SET_DSCP_TABLE_ID; - egrSetDscpTable.stage = ACL_STAGE_EGRESS; - auto egrSetDscpTableType = getAclTableType(TABLE_TYPE_EGR_SET_DSCP); - sai_object_id_t egrSetDscp_oid = getTableById(EGR_SET_DSCP_TABLE_ID); - // create the EGR_SET_DSCP fisrt time if not present. Otherwise update the existing table. - if (m_egrSetDscpRef.empty()) - { - // Create EGR_SET_DSCP table - egrSetDscpTable.validateAddType(*egrSetDscpTableType); - egrSetDscpTable.addMandatoryActions(); - if (!egrSetDscpTable.validate()) - { - SWSS_LOG_ERROR("Failed to validate ACL table %s", - EGR_SET_DSCP_TABLE_ID); - // since we failed to create the table, there is no need for rollback. - return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED;; - } - if (!addAclTable(egrSetDscpTable)) - { - SWSS_LOG_ERROR("Failed to create ACL table EgressSetDSCP"); - // since we failed to create the table, there is no need for rollback. - return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; - } - } - else - { - if (updateAclTable(m_AclTables[egrSetDscp_oid], egrSetDscpTable)) - { - SWSS_LOG_INFO("Successfully updated existing ACL table EgressSetDSCP"); - // We do not set the status here as we still have to update - // TABLE_TYPE_MARK_META/V6 table. - } - else - { - SWSS_LOG_ERROR("Failed to update existing ACL table EgressSetDSCP"); - // there is no need for roollback as we have not made any changes to the MARK_META/V6 tables. - // We can simply return false. - return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; - } - } - // keep track of the fact that this table is now associated with the EGR_SET_DSCP table. - m_egrSetDscpRef.insert(table_id); - SWSS_LOG_INFO("Added ACL table %s to EgrSetDscpRef", table_id.c_str()); - status = EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS; - - } - return status; -} - -bool AclOrch::removeEgrSetDscpTable(string table_id) -{ - m_egrSetDscpRef.erase(table_id); - if (m_egrSetDscpRef.size() == 0) - { - if (!removeAclTable(EGR_SET_DSCP_TABLE_ID)) - { - SWSS_LOG_ERROR("Failed to remove ACL table %s", EGR_SET_DSCP_TABLE_ID); - return false; - } - } - else - { - //create a dummy table with no ports. The updateAclTable will remove the - // unique ports which were associated with table_id. - // The way this works is as follows. - // The getAddDeletePorts function collects all the ports of the tables which - // are in m_egrSetDscpRef set and adds those ports to the EGR_SET_DSCP. - // As a result the EGR_SET_DSCP is associated with all the ports to which the - // TABLE_TYPE_UNDERLAY_SET_DSCP/V6 tables are attached. - // - // when we want to remove one of the tables referencing the EGR_SET_DSCP. - // we remove it from m_egrSetDscpRef, then send a updateAclTable with a - // EGR_SET_DSCP table with no assiciated ports. - // The getAddDeletePorts collects all the ports except for the one assocated - // with the table we just removed from m_egrSetDscpRef and updated the EGR_SET_DSCP - // with new port set. - AclTable dummyTable(this); - dummyTable.id = EGR_SET_DSCP_TABLE_ID; - dummyTable.stage = ACL_STAGE_EGRESS; - if (updateAclTable(EGR_SET_DSCP_TABLE_ID, dummyTable, "")) - { - SWSS_LOG_ERROR("Failed to remove ACL table %s", EGR_SET_DSCP_TABLE_ID); - return false; - } - } - return true; -} - -bool AclOrch::addEgrSetDscpRule(string key, string dscpAction) -{ - auto metadata = m_egrDscpRuleMetadata[key]; - - if (m_metadataEgrDscpRule[metadata].size() == 1) - { - // Create EGR_SET_DSCP rule. set the match criteria to metadata value and action to dscpAction. - auto egrSetDscpRule = make_shared(this, std::to_string(metadata), EGR_SET_DSCP_TABLE_ID, &m_metaDataMgr); - egrSetDscpRule->validateAddMatch(MATCH_METADATA, std::to_string(metadata)); - egrSetDscpRule->validateAddAction(ACTION_DSCP, dscpAction); - - if (egrSetDscpRule->validate()) - { - if (!addAclRule(egrSetDscpRule, EGR_SET_DSCP_TABLE_ID)) - { - SWSS_LOG_ERROR("Failed to create ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); - return false; - } - } - else - { - SWSS_LOG_ERROR("Failed to validate ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); - return false; - } - } - return true; -} - -bool AclOrch::removeEgrSetDscpRule(string key) -{ - auto metadata = m_egrDscpRuleMetadata[key]; - if (getMetaDataRefCount(metadata) == 1) - { - if(!removeAclRule(EGR_SET_DSCP_TABLE_ID, std::to_string(metadata))) - { - SWSS_LOG_ERROR("Failed to remove ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); - return false; - } - } - removeMetaDataRef(key, metadata); - m_metaDataMgr.recycleMetaData(metadata); - return true; -} - bool AclOrch::updateAclTable(string table_id, AclTable &table) { SWSS_LOG_ENTER(); @@ -4264,29 +3867,6 @@ bool AclOrch::updateAclTable(string table_id, AclTable &table) return true; } -bool AclOrch::updateAclTable(string table_id, AclTable &table, string orignalTableTypeName) -{ - SWSS_LOG_ENTER(); - // we call the addEgrSetDscpTable to add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP or TABLE_TYPE_UNDERLAY_SET_DSCPV6 - // for other tables it simply retuns EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_REQUIRED - EgressSetDscpTableStatus egrSetDscpStatus = addEgrSetDscpTable(table_id, table, orignalTableTypeName); - bool status = false; - if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED) - { - return false; - } - status = updateAclTable(table_id,table); - // if we have not updated the EGR_SET_DSCP, we simply need to return the status. - // otherewise we need to undo the changes we made to the EGR_SET_DSCP if the update - // of the MARK_META table failed. - if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS && !status) - { - // This is the scenario where we have successfully updated the EGR_SET_DSCP but failed to update the MARK_META table. - SWSS_LOG_ERROR("Reverting changes to EGR_SET_DSCP because update of %s failed", table_id.c_str()); - removeEgrSetDscpTable(table_id); - } - return status; -} bool AclOrch::addAclTable(AclTable &newTable) { @@ -4403,30 +3983,6 @@ bool AclOrch::addAclTable(AclTable &newTable) } } -bool AclOrch::addAclTable(string table_id, AclTable &newTable, string orignalTableTypeName) -{ - SWSS_LOG_ENTER(); - // we call the addEgrSetDscpTable to add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP - // or TABLE_TYPE_UNDERLAY_SET_DSCPV6. For other tables it simply retuns EGRESS_SET_DSCP_TABLE_NOT_REQUIRED. - EgressSetDscpTableStatus egrSetDscpStatus = addEgrSetDscpTable(table_id, newTable, orignalTableTypeName); - bool status = false; - if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED) - { - return false; - } - status = addAclTable(newTable); - // if we have not updated the EGR_SET_DSCP, we simply need to return the status. - // otherewise we need to undo the changes we made to the EGR_SET_DSCP if the update - // of the MARK_META table failed. - if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS && !status) - { - // This is the scenario where we have successfully updated the EGR_SET_DSCP but failed to update the MARK_META table. - SWSS_LOG_ERROR("Reverting changes to EGR_SET_DSCP because update of %s failed", table_id.c_str()); - removeEgrSetDscpTable(table_id); - } - return status; -} - bool AclOrch::removeAclTable(string table_id) { SWSS_LOG_ENTER(); @@ -4485,20 +4041,6 @@ bool AclOrch::removeAclTable(string table_id) } } -bool AclOrch::removeAclTableWithEgrDscp(string table_id) -{ - SWSS_LOG_ENTER(); - bool egrSetDscpStatus = true; - if(m_egrSetDscpRef.find(table_id) != m_egrSetDscpRef.end()) - { - egrSetDscpStatus = removeEgrSetDscpTable(table_id); - } - if (!egrSetDscpStatus) - { - return false; - } - return removeAclTable(table_id); -} bool AclOrch::addAclTableType(const AclTableType& tableType) { SWSS_LOG_ENTER(); @@ -4559,34 +4101,6 @@ bool AclOrch::addAclRule(shared_ptr newRule, string table_id) return true; } -bool AclOrch::addAclRuleWithEgrSetDscp(shared_ptr newRule, string table_id) -{ - SWSS_LOG_ENTER(); - bool needsEgrSetDscp = false; - string key = table_id + ":" + newRule->getId(); - // if the table is using EGR_SET_DSCP, we need to add the EGR_SET_DSCP rule. - if (isUsingEgrSetDscp(table_id)) - { - needsEgrSetDscp = true; - string dscpAction = std::to_string(std::static_pointer_cast(newRule)->getDscpValue()); - if (!addEgrSetDscpRule(key, dscpAction)) - { - SWSS_LOG_ERROR("Failed to add Egress Set Dscp rule for Rule %s in table %s.", - newRule->getId().c_str(), table_id.c_str()); - return false; - } - } - // add the regular rule. - bool status = addAclRule(newRule, table_id); - if(!status && needsEgrSetDscp) - { - removeEgrSetDscpRule(key); - return false; - } - - return status; -} - bool AclOrch::removeAclRule(string table_id, string rule_id) { sai_object_id_t table_oid = getTableById(table_id); @@ -4612,19 +4126,6 @@ bool AclOrch::removeAclRule(string table_id, string rule_id) return m_AclTables[table_oid].remove(rule_id); } -bool AclOrch::removeAclRuleWithEgrSetDscp(string table_id, string rule_id) -{ - string key = table_id + ":" + rule_id; - if (m_egrDscpRuleMetadata.find(key) != m_egrDscpRuleMetadata.end()) - { - if (!removeEgrSetDscpRule(key)) - { - return false; - } - } - return removeAclRule(table_id, rule_id); -} - AclRule* AclOrch::getAclRule(string table_id, string rule_id) { sai_object_id_t table_oid = getTableById(table_id); @@ -4858,56 +4359,6 @@ bool AclOrch::isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_ac return it->second.find(param.s32) != it->second.cend(); } -bool AclOrch::isUsingEgrSetDscp(const string& table) const -{ - if (m_egrSetDscpRef.find(table) != m_egrSetDscpRef.end()) - { - return true; - } - return false; -} - -string AclOrch::translateUnderlaySetDscpTableTypeName(const string& tableTypeName) const -{ - // The TABLE_TYPE_UNDERLAY_SET_DSCP/V6 is translated to table translates into TABLE_TYPE_MARK_META/V6 - if (tableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCP) - { - return TABLE_TYPE_MARK_META; - } - else if(tableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCPV6) - { - return TABLE_TYPE_MARK_META_V6; - } - return tableTypeName; -} - -void AclOrch::addMetaDataRef(string key, uint8_t metadata) -{ - m_egrDscpRuleMetadata[key] = metadata; - if (m_metadataEgrDscpRule.find(metadata) == m_metadataEgrDscpRule.end()) - { - m_metadataEgrDscpRule[metadata] = set(); - } - m_metadataEgrDscpRule[metadata].insert(key); - -} - -void AclOrch::removeMetaDataRef(string key, uint8_t metadata) -{ - m_metadataEgrDscpRule[metadata].erase(key); - m_egrDscpRuleMetadata.erase(key); -} - -uint32_t AclOrch::getMetaDataRefCount(uint8_t metadata) -{ - if (m_metadataEgrDscpRule.find(metadata) != m_metadataEgrDscpRule.end()) - { - return uint32_t(m_metadataEgrDscpRule[metadata].size()); - } - return 0; -} - - void AclOrch::doAclTableTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -4984,14 +4435,6 @@ void AclOrch::doAclTableTask(Consumer &consumer) break; } } - // For the case of Table type TABLE_TYPE_UNDERLAY_SET_DSCP/V6 we need to translate - // it to TABLE_TYPE_MARK_META/V6. We retain the original table type name in orignalTableTypeName - // and pass it ot the updateAclTable/ addAclTable functions. There based on the orignalTableTypeName - // we create/update the EgrSetDscp table. - string firstTableTypeName; - string unused; - string orignalTableTypeName = tableTypeName; - tableTypeName = translateUnderlaySetDscpTableTypeName(tableTypeName); auto tableType = getAclTableType(tableTypeName); if (!tableType) @@ -5018,7 +4461,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) m_AclTables[table_oid])) { // Update the existing table using the info in newTable - if (updateAclTable(table_id, newTable, orignalTableTypeName)) + if (updateAclTable(m_AclTables[table_oid], newTable)) { SWSS_LOG_NOTICE("Successfully updated existing ACL table %s", table_id.c_str()); @@ -5035,7 +4478,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else { - if (addAclTable(table_id, newTable, orignalTableTypeName)) + if (addAclTable(newTable)) { // Mark ACL table as ACTIVE setAclTableStatus(table_id, AclObjectStatus::ACTIVE); @@ -5043,7 +4486,6 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else { - //we have failed to create the MarkMeta table, we need to remove the EgrSetDscp table setAclTableStatus(table_id, AclObjectStatus::PENDING_CREATION); it++; } @@ -5060,7 +4502,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else if (op == DEL_COMMAND) { - if (removeAclTableWithEgrDscp(table_id)) + if (removeAclTable(table_id)) { // Remove ACL table status from STATE_DB removeAclTableStatus(table_id); @@ -5139,7 +4581,7 @@ void AclOrch::doAclRuleTask(Consumer &consumer) try { - newRule = AclRule::makeShared(this, m_mirrorOrch, m_dTelOrch, rule_id, table_id, t, &m_metaDataMgr); + newRule = AclRule::makeShared(this, m_mirrorOrch, m_dTelOrch, rule_id, table_id, t); } catch (exception &e) { @@ -5216,19 +4658,20 @@ void AclOrch::doAclRuleTask(Consumer &consumer) SWSS_LOG_ERROR("Failed to add attribute '%s : %s'", attr_name.c_str(), attr_value.c_str()); } } + if (bHasIPV4 && bHasIPV6) - { - if (type == TABLE_TYPE_L3V4V6) - { - SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); - bAllAttributesOk = false; - } - } + { + if (type == TABLE_TYPE_L3V4V6) + { + SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); + bAllAttributesOk = false; + } + } // validate and create ACL rule if (bAllAttributesOk && newRule->validate()) { - if (addAclRuleWithEgrSetDscp(newRule, table_id)) + if (addAclRule(newRule, table_id)) { setAclRuleStatus(table_id, rule_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); @@ -5249,7 +4692,7 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } else if (op == DEL_COMMAND) { - if (removeAclRuleWithEgrSetDscp(table_id, rule_id)) + if (removeAclRule(table_id, rule_id)) { removeAclRuleStatus(table_id, rule_id); it = consumer.m_toSync.erase(it); @@ -5669,55 +5112,3 @@ void AclOrch::removeAllAclRuleStatus() } } -MetaDataMgr::MetaDataMgr() -{ - for (uint8_t i = METADATA_VALUE_START; i <= METADATA_VALUE_END; i++) - { - m_freeMetadata.push_back(i); - } -} -uint8_t MetaDataMgr::getFreeMetaData(uint8_t dscp) -{ - uint8_t metadata =METADATA_VALUE_INVALID; - if (m_dscpMetadata.find(dscp) != m_dscpMetadata.end()) - { - // dscp value has a metadata value assigned to it. - metadata = m_dscpMetadata[dscp]; - } - else - { - if (m_freeMetadata.empty()) - { - SWSS_LOG_ERROR("Metadata Value not available for allocation."); - return metadata; - } - metadata = m_freeMetadata.front(); - m_freeMetadata.erase(m_freeMetadata.begin()); - m_dscpMetadata[dscp] = metadata; - } - m_MetadataRef[metadata] += 1; - return metadata; -} - -void MetaDataMgr::recycleMetaData(uint8_t metadata) -{ - m_MetadataRef[metadata] -= 1; - if (m_MetadataRef[metadata] == 0) - { - - for (auto iter = m_dscpMetadata.begin(); iter != m_dscpMetadata.end();) - { - if ( iter->second == metadata) - { - m_dscpMetadata.erase(iter++); - m_freeMetadata.push_front(metadata); - break; - } - else - { - ++iter; - } - } - } -} - diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 1c0490d862..abeaf519e2 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -51,7 +51,6 @@ #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" #define MATCH_BTH_OPCODE "BTH_OPCODE" #define MATCH_AETH_SYNDROME "AETH_SYNDROME" -#define MATCH_METADATA "META_DATA" #define BIND_POINT_TYPE_PORT "PORT" #define BIND_POINT_TYPE_PORTCHANNEL "PORTCHANNEL" @@ -69,8 +68,6 @@ #define ACTION_DTEL_FLOW_SAMPLE_PERCENT "FLOW_SAMPLE_PERCENT" #define ACTION_DTEL_REPORT_ALL_PACKETS "REPORT_ALL_PACKETS" #define ACTION_COUNTER "COUNTER" -#define ACTION_META_DATA "META_DATA_ACTION" -#define ACTION_DSCP "DSCP_ACTION" #define PACKET_ACTION_FORWARD "FORWARD" #define PACKET_ACTION_DROP "DROP" @@ -112,13 +109,6 @@ enum AclObjectStatus PENDING_REMOVAL }; -enum EgressSetDscpTableStatus -{ - EGRESS_SET_DSCP_TABLE_FAILED = 0, - EGRESS_SET_DSCP_TABLE_SUCCESS, - EGRESS_SET_DSCP_TABLE_NOT_REQUIRED, -}; - struct AclActionCapabilities { set actionList; @@ -175,20 +165,6 @@ class AclTableRangeMatch: public AclTableMatchInterface private: vector m_rangeList; }; - -class MetaDataMgr -{ -public: - MetaDataMgr(); - uint8_t getFreeMetaData(uint8_t dscp); - void recycleMetaData(uint8_t metadata); - -private: - list m_freeMetadata; - map m_dscpMetadata; - map m_MetadataRef; -}; - class AclTableType { public: @@ -302,7 +278,7 @@ class AclRule bool getCreateCounter() const; const vector& getRangeConfig() const; - static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&, MetaDataMgr * m_metadataMgr); + static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&); virtual ~AclRule() {} protected: @@ -401,23 +377,6 @@ class AclRuleDTelWatchListEntry: public AclRule bool INT_session_valid; }; -class AclRuleUnderlaySetDscp: public AclRule -{ -public: - AclRuleUnderlaySetDscp(AclOrch *m_pAclOrch, string rule, string table, MetaDataMgr* m_metaDataMgr, bool createCounter = true); - - bool validateAddAction(string attr_name, string attr_value); - bool validate(); - void onUpdate(SubjectType, void *) override; - uint32_t getDscpValue() const; - uint32_t getMetadata() const; -protected: - uint32_t cachedDscpValue; - uint32_t cachedMetadata; - string table_id; - MetaDataMgr* m_metaDataMgr; -}; - class AclTable { public: @@ -528,17 +487,6 @@ class AclOrch : public Orch, public Observer bool addAclTable(AclTable &aclTable); bool removeAclTable(string table_id); - bool addAclTable(string table_id, AclTable &aclTable, string orignalTableTypeName); - bool removeAclTableWithEgrDscp(string table_id); - bool updateAclTable(string table_id, AclTable &table, string orignalTableTypeName); - EgressSetDscpTableStatus addEgrSetDscpTable(string table_id, AclTable &table, string orignalTableTypeName); - - bool removeEgrSetDscpTable(string table_id); - bool addEgrSetDscpRule(string key, string dscpAction); - bool removeEgrSetDscpRule(string key); - bool addAclRuleWithEgrSetDscp(shared_ptr aclRule, string table_id); - bool removeAclRuleWithEgrSetDscp(string table_id, string rule_id); - bool addAclTableType(const AclTableType& tableType); bool removeAclTableType(const string& tableTypeName); bool updateAclTable(AclTable ¤tTable, AclTable &newTable); @@ -558,12 +506,6 @@ class AclOrch : public Orch, public Observer bool isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) const; bool isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const; bool isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_acl_action_parameter_t param) const; - bool isUsingEgrSetDscp(const string& table) const; - string translateUnderlaySetDscpTableTypeName(const string& tableTypeName) const; - - void addMetaDataRef(string key, uint8_t metadata); - void removeMetaDataRef(string key, uint8_t metadata); - uint32_t getMetaDataRefCount(uint8_t metadata); bool m_isCombinedMirrorV6Table = true; map m_mirrorTableCapabilities; @@ -644,12 +586,9 @@ class AclOrch : public Orch, public Observer Table m_aclTableStateTable; Table m_aclRuleStateTable; - MetaDataMgr m_metaDataMgr; map m_mirrorTableId; map m_mirrorV6TableId; - set m_egrSetDscpRef; - map> m_metadataEgrDscpRule; - map m_egrDscpRuleMetadata; + acl_capabilities_t m_aclCapabilities; acl_action_enum_values_capabilities_t m_aclEnumActionCapabilities; FlexCounterManager m_flex_counter_manager; diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 7c4ff86813..1b1cdeb29a 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -35,11 +35,7 @@ extern "C" { #define TABLE_TYPE_MCLAG "MCLAG" #define TABLE_TYPE_MUX "MUX" #define TABLE_TYPE_DROP "DROP" -#define TABLE_TYPE_MARK_META "MARK_META" -#define TABLE_TYPE_MARK_META_V6 "MARK_METAV6" -#define TABLE_TYPE_EGR_SET_DSCP "EGR_SET_DSCP" -#define TABLE_TYPE_UNDERLAY_SET_DSCP "UNDERLAY_SET_DSCP" -#define TABLE_TYPE_UNDERLAY_SET_DSCPV6 "UNDERLAY_SET_DSCPV6" + typedef enum { ACL_STAGE_UNKNOWN, diff --git a/tests/dvslib/dvs_acl.py b/tests/dvslib/dvs_acl.py index dc338ce9f1..236ccaa0fc 100644 --- a/tests/dvslib/dvs_acl.py +++ b/tests/dvslib/dvs_acl.py @@ -331,32 +331,7 @@ def verify_acl_table_action_list( for action in expected_action_list: assert action in action_list - def create_dscp_acl_rule( - self, - table_name: str, - rule_name: str, - qualifiers: Dict[str, str], - action: str, - priority: str = "2020" - ) -> None: - """Create a new DSCP ACL rule in the given table. - - Args: - table_name: The name of the ACL table to add the rule to. - rule_name: The name of the ACL rule. - qualifiers: The list of qualifiers to add to the rule. - action: DSCP value. - priority: The priority of the rule. - """ - fvs = { - "priority": priority, - "DSCP_ACTION": action - } - - for k, v in qualifiers.items(): - fvs[k] = v - self.config_db.create_entry("ACL_RULE", "{}|{}".format(table_name, rule_name), fvs) - + def create_acl_rule( self, table_name: str, diff --git a/tests/test_acl_mark.py b/tests/test_acl_mark.py deleted file mode 100644 index be09e7df8e..0000000000 --- a/tests/test_acl_mark.py +++ /dev/null @@ -1,447 +0,0 @@ -import pytest -from requests import request - -OVERLAY_TABLE_TYPE = "UNDERLAY_SET_DSCP" -OVERLAY_TABLE_NAME = "OVERLAY_MARK_META_TEST" -OVERLAY_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] -OVERLAY_RULE_NAME = "OVERLAY_TEST_RULE" - -OVERLAY_TABLE_TYPE6 = "UNDERLAY_SET_DSCPV6" -OVERLAY_TABLE_NAME6 = "OVERLAY_MARK_META_TEST6" -OVERLAY_BIND_PORTS6 = ["Ethernet20", "Ethernet24", "Ethernet28", "Ethernet32"] -OVERLAY_RULE_NAME6 = "OVERLAY_TEST_RULE6" - -# tests for UNDERLAY_SET_DSCP table - - -class TestAclMarkMeta: - @pytest.fixture - def overlay_acl_table(self, dvs_acl): - try: - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, - OVERLAY_TABLE_TYPE, - OVERLAY_BIND_PORTS) - yield dvs_acl.get_acl_table_ids(2) - finally: - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) - dvs_acl.verify_acl_table_count(0) - - @pytest.fixture - def overlay6_acl_table(self, dvs_acl): - try: - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, - OVERLAY_TABLE_TYPE6, - OVERLAY_BIND_PORTS6) - yield dvs_acl.get_acl_table_ids(2) - finally: - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) - dvs_acl.verify_acl_table_count(0) - - def verify_acl_table_group_members_multitable(self, dvs_acl, acl_table_id, acl_table_group_ids, member_count): - members = dvs_acl.asic_db.wait_for_n_keys(dvs_acl.ADB_ACL_GROUP_MEMBER_TABLE_NAME, - member_count) - - member_groups = [] - table_member_map = {} - for member in members: - fvs = dvs_acl.asic_db.wait_for_entry(dvs_acl.ADB_ACL_GROUP_MEMBER_TABLE_NAME, member) - group_id = fvs.get("SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID") - table_id = fvs.get("SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID") - - if group_id in acl_table_group_ids and table_id in acl_table_id: - member_groups.append(group_id) - if table_id not in table_member_map: - table_member_map[table_id] = [] - table_member_map[table_id].append(group_id) - - assert set(member_groups) == set(acl_table_group_ids) - return table_member_map - - def get_table_stage(self, dvs_acl, acl_table_id, v4_ports, v6_ports): - stages = [] - names = [] - ports = [] - for table in acl_table_id: - fvs = dvs_acl.asic_db.wait_for_entry(dvs_acl.ADB_ACL_TABLE_NAME, table) - stage = fvs.get("SAI_ACL_TABLE_ATTR_ACL_STAGE") - if stage == "SAI_ACL_STAGE_INGRESS": - stages.append("ingress") - elif stage == "SAI_ACL_STAGE_EGRESS": - stages.append("egress") - qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META") - if qual == "true": - names.append("EGR_SET_DSCP") - ports.append(v4_ports+v6_ports) - qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6") - if qual == "true": - names.append("MARK_META6") - ports.append(v6_ports) - qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_DST_IP") - if qual == "true": - names.append("MARK_META") - ports.append(v4_ports) - return stages, names, ports - - def verify_acl_table_port_binding_multi(self, dvs_acl, table_member_map, bind_ports, stages, acl_table_id): - for i in range(0, len(stages)): - stage = stages[i] - table = acl_table_id[i] - port_groups = [] - for port in bind_ports[i]: - port_oid = dvs_acl.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "").get(port) - fvs = dvs_acl.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - acl_table_group_id = fvs.pop(dvs_acl.ADB_PORT_ATTR_LOOKUP[stage], None) - assert acl_table_group_id in table_member_map[table] - port_groups.append(acl_table_group_id) - - assert len(port_groups) == len(bind_ports[i]) - assert set(port_groups) == set(table_member_map[table]) - - - def get_acl_rules_with_action(self, dvs_acl, total_rules): - """Verify that there are N rules in the ASIC DB.""" - members = dvs_acl.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", - total_rules) - - member_groups = [] - table_member_map = {} - for member in members: - fvs = dvs_acl.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", member) - table_id = fvs.get("SAI_ACL_ENTRY_ATTR_TABLE_ID") - entry = {} - entry['id'] = member - action = fvs.get("SAI_ACL_ENTRY_ATTR_ACTION_SET_DSCP") - if action: - entry['action_type'] = "dscp" - entry['action_value'] = action - meta = fvs.get("SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META") - entry['match_meta'] = meta.split('&')[0] - action = fvs.get("SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA") - if action: - entry['action_type'] = "meta" - entry['action_value'] = action - - if table_id not in table_member_map: - table_member_map[table_id] = [] - table_member_map[table_id].append(entry) - return table_member_map - - def verify_acl_rules_with_action(self, table_names, acl_table_id, table_rules, meta, dscp): - for i in range(0, len(table_names)): - if acl_table_id[i] in table_rules: - for j in range(0, len(table_rules[acl_table_id[i]])): - if table_names[i] == "MARK_META" or table_names[i] == "MARK_META6": - assert table_rules[acl_table_id[i]][j]['action_type'] == "meta" - assert table_rules[acl_table_id[i]][j]['action_value'] in meta - else: - assert table_rules[acl_table_id[i]][j]['action_type'] == "dscp" - assert table_rules[acl_table_id[i]][j]['action_value'] in dscp - assert table_rules[acl_table_id[i]][j]['match_meta'] in meta - - def test_OverlayTableCreationDeletion(self, dvs_acl): - try: - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) - # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. - acl_table_id = dvs_acl.get_acl_table_ids(2) - stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, OVERLAY_BIND_PORTS, []) - - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) - table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) - - self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) - - # Verify status is written into STATE_DB - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") - finally: - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) - dvs_acl.verify_acl_table_count(0) - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) - - def test_Overlay6TableCreationDeletion(self, dvs_acl): - try: - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS6) - # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. - acl_table_id = dvs_acl.get_acl_table_ids(2) - stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS6) - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*2) - table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) - - self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) - - # Verify status is written into STATE_DB - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") - finally: - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) - dvs_acl.verify_acl_table_count(0) - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) - - def test_OverlayBothv4v6TableCreationDeletion(self, dvs_acl): - try: - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS6) - # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. - acl_table_id = dvs_acl.get_acl_table_ids(3) - stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id,OVERLAY_BIND_PORTS, OVERLAY_BIND_PORTS6) - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*4) - table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 16) - - self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) - - # Verify status is written into STATE_DB - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") - finally: - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) - dvs_acl.verify_acl_table_count(2) - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) - acl_table_id = dvs_acl.get_acl_table_ids(2) - - stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS6) - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*2) - table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) - - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) - dvs_acl.verify_acl_table_count(0) - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) - - def test_OverlayBothv4v6TableSameintfCreationDeletion(self, dvs_acl): - try: - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS) - # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. - acl_table_id = dvs_acl.get_acl_table_ids(3) - stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id,OVERLAY_BIND_PORTS, OVERLAY_BIND_PORTS) - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) - table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 12) - - self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) - - # Verify status is written into STATE_DB - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") - finally: - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) - dvs_acl.verify_acl_table_count(2) - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) - acl_table_id = dvs_acl.get_acl_table_ids(2) - - stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) - table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) - - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) - dvs_acl.verify_acl_table_count(0) - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) - - def test_OverlayEntryCreationDeletion(self, dvs_acl, overlay_acl_table): - config_qualifiers = {"DST_IP": "20.0.0.1/32", - "SRC_IP": "10.0.0.0/32"} - acl_table_id = dvs_acl.get_acl_table_ids(2) - _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "VALID_RULE", config_qualifiers,action="12") - # Verify status is written into STATE_DB - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "VALID_RULE", "Active") - table_rules = self.get_acl_rules_with_action(dvs_acl, 2) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "VALID_RULE") - # Verify the STATE_DB entry is removed - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "VALID_RULE", None) - dvs_acl.verify_no_acl_rules() - - def test_OverlayEntryMultiRuleRef(self, dvs_acl, overlay_acl_table): - config_qualifiers = {"DST_IP": "20.0.0.1/32", - "SRC_IP": "10.0.0.0/32", - "DSCP": "1" - } - acl_table_id = dvs_acl.get_acl_table_ids(2) - _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - #create 1st Rule - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") - #create 2nd Rule - config_qualifiers["DSCP"] = "2" - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "2", config_qualifiers, action="12") - #create 3rd Rule - config_qualifiers["DSCP"] = "3" - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "3", config_qualifiers, action="12") - - #This should create 4 rules 3 for MARK_META and 1 for EGR_SET_DSCP - # Verify status is written into STATE_DB - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", "Active") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", "Active") - table_rules = self.get_acl_rules_with_action(dvs_acl, 4) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) - - # remove first rule. We should still have 3 rules, 2 for MARK_META and 1 for EGR_SET_DSCP - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) - table_rules = self.get_acl_rules_with_action(dvs_acl, 3) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) - - # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "2") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) - table_rules = self.get_acl_rules_with_action(dvs_acl, 2) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) - - # Verify the STATE_DB entry is removed - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "3") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", None) - - dvs_acl.verify_no_acl_rules() - - def test_OverlayEntryMultiTableRules(self, dvs_acl): - config_qualifiers = {"DST_IP": "20.0.0.1/32", - "SRC_IP": "10.0.0.0/32", - "DSCP": "1"} - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, - OVERLAY_TABLE_TYPE, - OVERLAY_BIND_PORTS) - dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, - OVERLAY_TABLE_TYPE6, - OVERLAY_BIND_PORTS6) - acl_table_id = dvs_acl.get_acl_table_ids(3) - _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - #create 1st Rule - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") - - #create 2nd Rule ipv6 - config_qualifiers6 = {"SRC_IPV6": "2777::0/64", - "DST_IPV6": "2788::0/64", - "DSCP" : "1"}; - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME6, "1", config_qualifiers6, action="12") - - # Verify status of both rules. - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME6, "1", "Active") - table_rules = self.get_acl_rules_with_action(dvs_acl, 3) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) - - # remove first rule. We should still have 1 rule, 1 for MARK_META - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME6, "1") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME6, "1", None) - table_rules = self.get_acl_rules_with_action(dvs_acl, 2) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") - - # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) - dvs_acl.verify_no_acl_rules() - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) - dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) - dvs_acl.verify_acl_table_count(0) - - def test_OverlayEntryMultiMetaRule(self, dvs_acl, overlay_acl_table): - config_qualifiers = {"DST_IP": "20.0.0.1/32", - "SRC_IP": "10.0.0.0/32", - "DSCP": "1" - } - - acl_table_id = dvs_acl.get_acl_table_ids(2) - _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - #create 1st Rule - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") - #create 2nd Rule - config_qualifiers["DSCP"] = "2" - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "2", config_qualifiers, action="13") - #create 3rd Rule - config_qualifiers["DSCP"] = "3" - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "3", config_qualifiers, action="14") - - #This should create 4 rules 3 for MARK_META and 1 for EGR_SET_DSCP - # Verify status is written into STATE_DB - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", "Active") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", "Active") - table_rules = self.get_acl_rules_with_action(dvs_acl, 6) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) - - # remove first rule. We should still have 3 rules, 2 for MARK_META and 1 for EGR_SET_DSCP - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) - table_rules = self.get_acl_rules_with_action(dvs_acl, 4) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) - - # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "2") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) - table_rules = self.get_acl_rules_with_action(dvs_acl, 2) - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) - - # Verify the STATE_DB entry is removed - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "3") - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", None) - - dvs_acl.verify_no_acl_rules() - - def test_OverlayEntryExhaustMeta(self, dvs_acl, overlay_acl_table): - config_qualifiers = {"DST_IP": "20.0.0.1/32", - "SRC_IP": "10.0.0.0/32", - "DSCP": "1" - } - acl_table_id = dvs_acl.get_acl_table_ids(2) - _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - #create 8 rules. 8th one should fail. - for i in range(1, 9): - config_qualifiers["DSCP"] = str(i) - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, str(i), config_qualifiers, action=str(i+10)) - if i < 8: - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), "Active") - else: - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) - - table_rules = self.get_acl_rules_with_action(dvs_acl, 14) - meta = [str(i) for i in range(1, 8)] - dscps = [str(i) for i in range(11, 18)] - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, meta, dscps) - - for i in range(1, 9): - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, str(i)) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) - dvs_acl.verify_no_acl_rules() - - def test_OverlayEntryTestMetaDataMgr(self, dvs_acl, overlay_acl_table): - # allocate all 7 metadata values and free them multiple times. - # At the end there should be no rules allocated. - for i in range(1, 4): - config_qualifiers = {"DST_IP": "20.0.0.1/32", - "SRC_IP": "10.0.0.0/32", - "DSCP": "1" - } - acl_table_id = dvs_acl.get_acl_table_ids(2) - _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) - #create 8 rules. 8th one should fail. - for i in range(1, 9): - config_qualifiers["DSCP"] = str(i) - dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, str(i), config_qualifiers, action=str(i+10)) - if i < 8: - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), "Active") - else: - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) - - table_rules = self.get_acl_rules_with_action(dvs_acl, 14) - meta = [str(i) for i in range(1, 8)] - dscps = [str(i) for i in range(11, 18)] - self.verify_acl_rules_with_action(names, acl_table_id, table_rules, meta, dscps) - - for i in range(1, 9): - dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, str(i)) - dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) - dvs_acl.verify_no_acl_rules() - - # Add Dummy always-pass test at end as workaroud -# for issue when Flaky fail on final test it invokes module tear-down before retrying -def test_nonflaky_dummy(): - pass From 6568193c7a9f73cb5c5f2aaa295baa7a95f3b3e8 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Sat, 1 Jun 2024 01:23:10 +0800 Subject: [PATCH 10/14] Do not apply QoS mapping item on the switch until the object is created (#3163) What I did Do not apply the global DSCP to TC map to the switch object until the mapping object has been created. Why I did it Fix issue: if orchagent handles tables in the following order, it will fail in step 1 and the configure will never applied. PORT_QOS_MAP|global object and then DSCP_TO_TC object --- orchagent/qosorch.cpp | 1 + tests/mock_tests/qosorch_ut.cpp | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 90fc6fc766..21cb11c5db 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -2018,6 +2018,7 @@ task_process_status QosOrch::handleGlobalQosMap(const string &OP, KeyOpFieldsVal { SWSS_LOG_INFO("Global QoS map %s is not yet created", map_name.c_str()); task_status = task_process_status::task_need_retry; + continue; } if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, id)) diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index 50aae599bf..0cdda7812d 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -1167,6 +1167,7 @@ namespace qosorch_test static_cast(gQosOrch)->doTask(); // Check DSCP_TO_TC_MAP|AZURE is applied to switch ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); // Remove global DSCP_TO_TC_MAP entries.push_back({"global", "DEL", {}}); @@ -1189,7 +1190,37 @@ namespace qosorch_test // Check DSCP_TO_TC_MAP|AZURE is removed, and the switch_level dscp_to_tc_map is set to NULL ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + + // Run the test in reverse order + entries.push_back({"global", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Try draining PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + entries.push_back({"AZURE", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Try draining DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); } TEST_F(QosOrchTest, QosOrchTestRetryFirstItem) From 98012ed411d4e83ac01dabbb6d03e4f122ad1545 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic <26731235+Ndancejic@users.noreply.github.com> Date: Fri, 31 May 2024 11:12:41 -0700 Subject: [PATCH 11/14] [build-docker-sonic-vs] Allowing partial build for sairedis (#3177) Allowing partial build for sairedis to make sure build isn't skipping latest on branch. --- .azure-pipelines/build-docker-sonic-vs-template.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index e276bd332d..2610fcb837 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -90,6 +90,7 @@ jobs: artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}' + allowPartiallySucceededBuilds: true path: $(Build.ArtifactStagingDirectory)/download/sairedis patterns: | ${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb From b3ebfc463db7b325021d6e72d53e06a2edb73a70 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic <26731235+Ndancejic@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:35:32 -0700 Subject: [PATCH 12/14] [muxorch] Using bulker to program routes/neighbors during switchover (#3148) * [muxorch] Using bulker to program routes/neighbors during switchover Uses entity bulker to program routes and neighbors during mux switchover. Mux switchover performance suffers when switching over with a large number of neighbors on the mux port. This uses the optimization of programming the neighbors and routes in bulk to avoid sequentially programming each. What I did Changed mux switchover logic to use neighbor and bulk switchover instead of programming neighbors sequentially. Why I did it Testing shows this improves switchover time by an average of 30% at 128 neighbors. How I verified it added a test to vstests to test functionality of code, and tested performance on a dualtor lab testbed. --- orchagent/muxorch.cpp | 189 +++++++++- orchagent/muxorch.h | 27 +- orchagent/neighorch.cpp | 342 +++++++++++++++++- orchagent/neighorch.h | 32 +- orchagent/p4orch/tests/mock_sai_neighbor.h | 18 + .../p4orch/tests/neighbor_manager_test.cpp | 2 + tests/mock_tests/aclorch_ut.cpp | 3 + tests/mock_tests/bulker_ut.cpp | 31 ++ tests/mock_tests/mock_sai_api.h | 54 ++- tests/mock_tests/mux_rollback_ut.cpp | 60 ++- tests/test_mux.py | 60 +++ 11 files changed, 760 insertions(+), 58 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index ea3ade347c..ce6bf2baf2 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -744,6 +744,8 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -751,13 +753,21 @@ bool MuxNbrHandler::enable(bool update_rt) SWSS_LOG_INFO("Enabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->enableNeighbor(neigh)) - { - SWSS_LOG_INFO("Enabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); + it++; + } + + if (!gNeighOrch->enableNeighbors(neigh_ctx_list)) + { + return false; + } + it = neighbors_.begin(); + while (it != neighbors_.end()) + { /* Update NH to point to learned neighbor */ + neigh = NeighborEntry(it->first, alias_); it->second = gNeighOrch->getLocalNextHopId(neigh); /* Reprogram route */ @@ -795,22 +805,26 @@ bool MuxNbrHandler::enable(bool update_rt) IpPrefix pfx = it->first.to_string(); if (update_rt) { - if (remove_route(pfx) != SAI_STATUS_SUCCESS) - { - return false; - } + route_ctx_list.push_back(MuxRouteBulkContext(pfx)); updateTunnelRoute(nh_key, false); } it++; } + if (update_rt && !removeRoutes(route_ctx_list)) + { + return false; + } + return true; } bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -852,21 +866,25 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) updateTunnelRoute(nh_key, true); IpPrefix pfx = it->first.to_string(); - if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) - { - return false; - } + route_ctx_list.push_back(MuxRouteBulkContext(pfx, it->second)); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->disableNeighbor(neigh)) - { - SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); it++; } + if (!addRoutes(route_ctx_list)) + { + return false; + } + + if (!gNeighOrch->disableNeighbors(neigh_ctx_list)) + { + return false; + } + return true; } @@ -881,6 +899,141 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } +bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) +{ + sai_status_t status; + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Adding route entry %s, nh %" PRIx64 " to bulker", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); + + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = ctx->nh; + attrs.push_back(attr); + + status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); + } + + gRouteBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_INFO("Tunnel route to %s already exists", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", + ctx->pfx.getIp().to_string().c_str(), ctx->nh, status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Created tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + + gRouteBulker.clear(); + return ret; +} + +bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) +{ + sai_status_t status; + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Removing route entry %s, nh %" PRIx64 "", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); + } + + gRouteBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_INFO("Tunnel route to %s already removed", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", + ctx->pfx.getIp().to_string().c_str(), status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + + gRouteBulker.clear(); + return ret; +} + void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) { MuxOrch* mux_orch = gDirectory.get(); diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 22f01ce27d..3a6d165db4 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -10,6 +10,7 @@ #include "tunneldecaporch.h" #include "aclorch.h" #include "neighorch.h" +#include "bulker.h" enum MuxState { @@ -35,6 +36,26 @@ enum MuxCableType ACTIVE_ACTIVE }; +struct MuxRouteBulkContext +{ + std::deque object_statuses; // Bulk statuses + IpPrefix pfx; // Route prefix + sai_object_id_t nh; // nexthop id + + MuxRouteBulkContext(IpPrefix pfx) + : pfx(pfx) + { + } + + MuxRouteBulkContext(IpPrefix pfx, sai_object_id_t nh) + : pfx(pfx), nh(nh) + { + } +}; + +extern size_t gMaxBulkSize; +extern sai_route_api_t* sai_route_api; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -64,7 +85,7 @@ typedef std::map MuxNeighbor; class MuxNbrHandler { public: - MuxNbrHandler() = default; + MuxNbrHandler() : gRouteBulker(sai_route_api, gMaxBulkSize) {}; bool enable(bool update_rt); bool disable(sai_object_id_t); @@ -75,11 +96,15 @@ class MuxNbrHandler string getAlias() const { return alias_; }; private: + bool removeRoutes(std::list& bulk_ctx_list); + bool addRoutes(std::list& bulk_ctx_list); + inline void updateTunnelRoute(NextHopKey, bool = true); private: MuxNeighbor neighbors_; string alias_; + EntityBulker gRouteBulker; }; // Mux Cable object diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 006f456a1c..df96405791 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -22,10 +22,12 @@ extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; extern BfdOrch *gBfdOrch; +extern size_t gMaxBulkSize; const int neighorch_pri = 30; NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, FdbOrch *fdbOrch, PortsOrch *portsOrch, DBConnector *chassisAppDb) : + gNeighBulker(sai_neighbor_api, gMaxBulkSize), Orch(appDb, tableName, neighorch_pri), m_intfsOrch(intfsOrch), m_fdbOrch(fdbOrch), @@ -793,6 +795,8 @@ void NeighOrch::doTask(Consumer &consumer) NeighborEntry neighbor_entry = { ip_address, alias }; + NeighborContext ctx = NeighborContext(neighbor_entry); + if (op == SET_COMMAND) { Port p; @@ -818,6 +822,8 @@ void NeighOrch::doTask(Consumer &consumer) mac_address = MacAddress(fvValue(*i)); } + ctx.mac = mac_address; + bool nbr_not_found = (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()); if (nbr_not_found || m_syncdNeighbors[neighbor_entry].mac != mac_address) { @@ -846,7 +852,7 @@ void NeighOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); } } - else if (addNeighbor(neighbor_entry, mac_address)) + else if (addNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -877,7 +883,7 @@ void NeighOrch::doTask(Consumer &consumer) { if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { - if (removeNeighbor(neighbor_entry)) + if (removeNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -898,13 +904,18 @@ void NeighOrch::doTask(Consumer &consumer) } } -bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress &macAddress) +bool NeighOrch::addNeighbor(NeighborContext& ctx) { SWSS_LOG_ENTER(); sai_status_t status; + auto& object_statuses = ctx.object_statuses; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; IpAddress ip_address = neighborEntry.ip_address; string alias = neighborEntry.alias; + bool bulk_op = ctx.bulk_op; sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); if (rif_id == SAI_NULL_OBJECT_ID) @@ -973,7 +984,8 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress SWSS_LOG_NOTICE("Neighbor %s already learned on %s in VRF %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str(), vrf_name.c_str()); } - if (!removeNeighbor(temp_entry)) + NeighborContext removeContext = NeighborContext(temp_entry); + if (!removeNeighbor(removeContext)) { SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); return false; @@ -995,6 +1007,15 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) { + // Using bulker, return and post-process later + if (bulk_op) + { + SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); + object_statuses.emplace_back(); + gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); + return true; + } + status = sai_neighbor_api->create_neighbor_entry(&neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -1091,13 +1112,17 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress return true; } -bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) +bool NeighOrch::removeNeighbor(NeighborContext& ctx, bool disable) { SWSS_LOG_ENTER(); sai_status_t status; - IpAddress ip_address = neighborEntry.ip_address; + auto& object_statuses = ctx.object_statuses; + + const NeighborEntry neighborEntry = ctx.neighborEntry; string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + bool bulk_op = ctx.bulk_op; NextHopKey nexthop = { ip_address, alias }; if(m_intfsOrch->isRemoteSystemPortIntf(alias)) @@ -1168,6 +1193,13 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) SWSS_LOG_NOTICE("Removed next hop %s on %s", ip_address.to_string().c_str(), alias.c_str()); + if (bulk_op) + { + object_statuses.emplace_back(); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); + return true; + } + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); if (status != SAI_STATUS_SUCCESS) { @@ -1227,6 +1259,185 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) return true; } +/* Process bulk ctx entry and enable the neigbor */ +bool NeighOrch::processBulkEnableNeighbor(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (!ctx.bulk_op) + { + SWSS_LOG_INFO("Not a bulk entry for %s on %s", ip_address.to_string().c_str(), alias.c_str()); + return true; + } + + SWSS_LOG_INFO("Checking neighbor create entry status %s on %s.", ip_address.to_string().c_str(), alias.c_str()); + + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + if (rif_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif_id for %s", alias.c_str()); + return false; + } + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + MuxOrch* mux_orch = gDirectory.get(); + if (mux_orch->isNeighborActive(ip_address, macAddress, alias)) + { + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_INFO("Neighbor exists: neighbor %s on %s, skipping: status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + return true; + } + else + { + SWSS_LOG_ERROR("Failed to create neighbor %s on %s, status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + SWSS_LOG_NOTICE("Created neighbor ip %s, %s on %s", ip_address.to_string().c_str(), + macAddress.to_string().c_str(), alias.c_str()); + + m_intfsOrch->increaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + if (!addNextHop(NextHopKey(ip_address, alias))) + { + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + return false; + } + } + + m_syncdNeighbors[neighborEntry] = { macAddress, true }; + + NeighborUpdate update = { neighborEntry, macAddress, true }; + notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + return true; +} + +/* Process bulk ctx entry and disable the neigbor */ +bool NeighOrch::processBulkDisableNeighbor(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + return true; + } + + SWSS_LOG_INFO("Checking neighbor remove entry status %s on %s.", ip_address.to_string().c_str(), m_syncdNeighbors[neighborEntry].mac.to_string().c_str()); + + if (isHwConfigured(neighborEntry)) + { + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("Bulk remove entry skipped, neighbor %s on %s already removed, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + } + else + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + else + { + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + removeNextHop(ip_address, alias); + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + SWSS_LOG_NOTICE("Removed neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + } + } + + /* Do not delete entry from cache for disable request */ + m_syncdNeighbors[neighborEntry].hw_configured = false; + return true; +} + bool NeighOrch::isHwConfigured(const NeighborEntry& neighborEntry) { if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) @@ -1253,7 +1464,11 @@ bool NeighOrch::enableNeighbor(const NeighborEntry& neighborEntry) return true; } - return addNeighbor(neighborEntry, m_syncdNeighbors[neighborEntry].mac); + NeighborEntry neigh = neighborEntry; + NeighborContext ctx = NeighborContext(neigh); + ctx.mac = m_syncdNeighbors[neighborEntry].mac; + + return addNeighbor(ctx); } bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) @@ -1272,7 +1487,108 @@ bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) return true; } - return removeNeighbor(neighborEntry, true); + NeighborContext ctx = NeighborContext(neighborEntry); + + return removeNeighbor(ctx, true); +} + +/* enable neighbors using bulker */ +bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) +{ + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + if (isHwConfigured(neighborEntry)) + { + SWSS_LOG_INFO("Neighbor %s is already programmed to HW", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + SWSS_LOG_NOTICE("Neighbor enable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!addNeighbor(*ctx)) + { + SWSS_LOG_ERROR("Neighbor %s create entry failed.", neighborEntry.ip_address.to_string().c_str()); + continue; + } + } + + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + if (ctx->object_statuses.empty()) + { + continue; + } + + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!processBulkEnableNeighbor(*ctx)) + { + SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries */ + ret = false; + } + } + + gNeighBulker.clear(); + return ret; +} + +/* disable neighbors using bulker */ +bool NeighOrch::disableNeighbors(std::list& bulk_ctx_list) +{ + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!removeNeighbor(*ctx, true)) + { + SWSS_LOG_ERROR("Neighbor %s remove entry failed.", neighborEntry.ip_address.to_string().c_str()); + } + } + + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + if (ctx->object_statuses.empty()) + { + continue; + } + + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!processBulkDisableNeighbor(*ctx)) + { + SWSS_LOG_INFO("Disable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries but return false */ + ret = false; + } + } + + gNeighBulker.clear(); + return ret; } sai_object_id_t NeighOrch::addTunnelNextHop(const NextHopKey& nh) @@ -1454,7 +1770,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) SWSS_LOG_NOTICE("VOQ encap index set failed for neighbor %s. Removing and re-adding", kfvKey(t).c_str()); //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); @@ -1485,7 +1802,9 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) } //Add neigh to SAI - if (addNeighbor(neighbor_entry, mac_address)) + NeighborContext ctx = NeighborContext(neighbor_entry); + ctx.mac = mac_address; + if (addNeighbor(ctx)) { //neigh successfully added to SAI. Set STATE DB to signal kernel programming by neighbor manager @@ -1538,7 +1857,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index f44741fa37..0b59181db1 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -12,6 +12,7 @@ #include "producerstatetable.h" #include "schema.h" #include "bfdorch.h" +#include "bulker.h" #define NHFLAGS_IFDOWN 0x1 // nexthop's outbound i/f is down @@ -43,6 +44,27 @@ struct NeighborUpdate bool add; }; +/* + * Keeps track of neighbor entry information primarily for bulk operations + */ +struct NeighborContext +{ + NeighborEntry neighborEntry; // neighbor entry to process + std::deque object_statuses; // bulk statuses + MacAddress mac; // neighbor mac + bool bulk_op = false; // use bulker (only for mux use for now) + + NeighborContext(NeighborEntry neighborEntry) + : neighborEntry(neighborEntry) + { + } + + NeighborContext(NeighborEntry neighborEntry, bool bulk_op) + : neighborEntry(neighborEntry), bulk_op(bulk_op) + { + } +}; + class NeighOrch : public Orch, public Subject, public Observer { public: @@ -66,6 +88,8 @@ class NeighOrch : public Orch, public Subject, public Observer bool enableNeighbor(const NeighborEntry&); bool disableNeighbor(const NeighborEntry&); + bool enableNeighbors(std::list&); + bool disableNeighbors(std::list&); bool isHwConfigured(const NeighborEntry&); sai_object_id_t addTunnelNextHop(const NextHopKey&); @@ -95,10 +119,14 @@ class NeighOrch : public Orch, public Subject, public Observer std::set m_neighborToResolve; + EntityBulker gNeighBulker; + bool removeNextHop(const IpAddress&, const string&); - bool addNeighbor(const NeighborEntry&, const MacAddress&); - bool removeNeighbor(const NeighborEntry&, bool disable = false); + bool addNeighbor(NeighborContext& ctx); + bool removeNeighbor(NeighborContext& ctx, bool disable = false); + bool processBulkEnableNeighbor(NeighborContext& ctx); + bool processBulkDisableNeighbor(NeighborContext& ctx); bool setNextHopFlag(const NextHopKey &, const uint32_t); bool clearNextHopFlag(const NextHopKey &, const uint32_t); diff --git a/orchagent/p4orch/tests/mock_sai_neighbor.h b/orchagent/p4orch/tests/mock_sai_neighbor.h index cd8f2aa0a9..4355831d36 100644 --- a/orchagent/p4orch/tests/mock_sai_neighbor.h +++ b/orchagent/p4orch/tests/mock_sai_neighbor.h @@ -16,6 +16,12 @@ class MockSaiNeighbor MOCK_METHOD1(remove_neighbor_entry, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry)); + MOCK_METHOD6(create_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses)); + MOCK_METHOD2(set_neighbor_entry_attribute, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr)); @@ -37,6 +43,18 @@ sai_status_t mock_remove_neighbor_entry(_In_ const sai_neighbor_entry_t *neighbo return mock_sai_neighbor->remove_neighbor_entry(neighbor_entry); } +sai_status_t mock_create_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->create_neighbor_entries(object_count, neighbor_entry, attr_count, attr_list, mode, object_statuses); +} + +sai_status_t mock_remove_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->remove_neighbor_entries(object_count, neighbor_entry, mode, object_statuses); +} + sai_status_t mock_set_neighbor_entry_attribute(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr) { diff --git a/orchagent/p4orch/tests/neighbor_manager_test.cpp b/orchagent/p4orch/tests/neighbor_manager_test.cpp index 4db1db873e..7523701cb7 100644 --- a/orchagent/p4orch/tests/neighbor_manager_test.cpp +++ b/orchagent/p4orch/tests/neighbor_manager_test.cpp @@ -124,6 +124,8 @@ class NeighborManagerTest : public ::testing::Test mock_sai_neighbor = &mock_sai_neighbor_; sai_neighbor_api->create_neighbor_entry = mock_create_neighbor_entry; sai_neighbor_api->remove_neighbor_entry = mock_remove_neighbor_entry; + sai_neighbor_api->create_neighbor_entries = mock_create_neighbor_entries; + sai_neighbor_api->remove_neighbor_entries = mock_remove_neighbor_entries; sai_neighbor_api->set_neighbor_entry_attribute = mock_set_neighbor_entry_attribute; sai_neighbor_api->get_neighbor_entry_attribute = mock_get_neighbor_entry_attribute; } diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 8005199935..4a92d65c80 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -24,6 +24,7 @@ extern sai_port_api_t *sai_port_api; extern sai_vlan_api_t *sai_vlan_api; extern sai_bridge_api_t *sai_bridge_api; extern sai_route_api_t *sai_route_api; +extern sai_route_api_t *sai_neighbor_api; extern sai_mpls_api_t *sai_mpls_api; extern sai_next_hop_group_api_t* sai_next_hop_group_api; extern string gMySwitchType; @@ -318,6 +319,7 @@ namespace aclorch_test sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); sai_api_query(SAI_API_ROUTE, (void **)&sai_route_api); + sai_api_query(SAI_API_NEIGHBOR, (void **)&sai_neighbor_api); sai_api_query(SAI_API_MPLS, (void **)&sai_mpls_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_NEXT_HOP_GROUP, (void **)&sai_next_hop_group_api); @@ -490,6 +492,7 @@ namespace aclorch_test sai_vlan_api = nullptr; sai_bridge_api = nullptr; sai_route_api = nullptr; + sai_neighbor_api = nullptr; sai_mpls_api = nullptr; } diff --git a/tests/mock_tests/bulker_ut.cpp b/tests/mock_tests/bulker_ut.cpp index 6210cc0969..dc5ad78776 100644 --- a/tests/mock_tests/bulker_ut.cpp +++ b/tests/mock_tests/bulker_ut.cpp @@ -2,6 +2,7 @@ #include "bulker.h" extern sai_route_api_t *sai_route_api; +extern sai_neighbor_api_t *sai_neighbor_api; namespace bulker_test { @@ -17,12 +18,18 @@ namespace bulker_test { ASSERT_EQ(sai_route_api, nullptr); sai_route_api = new sai_route_api_t(); + + ASSERT_EQ(sai_neighbor_api, nullptr); + sai_neighbor_api = new sai_neighbor_api_t(); } void TearDown() override { delete sai_route_api; sai_route_api = nullptr; + + delete sai_neighbor_api; + sai_neighbor_api = nullptr; } }; @@ -142,4 +149,28 @@ namespace bulker_test // Confirm route entry is not pending removal ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_non_remove)); } + + TEST_F(BulkerTest, NeighborBulker) + { + // Create bulker + EntityBulker gNeighBulker(sai_neighbor_api, 1000); + deque object_statuses; + + // Check max bulk size + ASSERT_EQ(gNeighBulker.max_bulk_size, 1000); + + // Create a dummy neighbor entry + sai_neighbor_entry_t neighbor_entry_remove; + neighbor_entry_remove.ip_address.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + neighbor_entry_remove.ip_address.addr.ip4 = 0x10000001; + neighbor_entry_remove.rif_id = 0x0; + neighbor_entry_remove.switch_id = 0x0; + + // Put neighbor entry into remove + object_statuses.emplace_back(); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry_remove); + + // Confirm neighbor entry is pending removal + ASSERT_TRUE(gNeighBulker.bulk_entry_pending_removal(neighbor_entry_remove)); + } } diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h index 7819b5b126..58e3c9da23 100644 --- a/tests/mock_tests/mock_sai_api.h +++ b/tests/mock_tests/mock_sai_api.h @@ -24,8 +24,12 @@ EXTERN_MOCK_FNS #define CREATE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define REMOVE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry +#define CREATE_BULK_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses +#define REMOVE_BULK_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ sai_bulk_op_error_mode_t mode, _In_ sai_status_t *object_statuses #define CREATE_ARGS(sai_object_type) sai_object_type##_entry, attr_count, attr_list #define REMOVE_ARGS(sai_object_type) sai_object_type##_entry +#define CREATE_BULK_ARGS(sai_object_type) object_count, sai_object_type##_entry, attr_count, attr_list, mode, object_statuses +#define REMOVE_BULK_ARGS(sai_object_type) object_count, sai_object_type##_entry, mode, object_statuses #define GENERIC_CREATE_PARAMS(sai_object_type) _Out_ sai_object_id_t *sai_object_type##_id, _In_ sai_object_id_t switch_id, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define GENERIC_REMOVE_PARAMS(sai_object_type) _In_ sai_object_id_t sai_object_type##_id #define GENERIC_CREATE_ARGS(sai_object_type) sai_object_type##_id, switch_id, attr_count, attr_list @@ -42,8 +46,8 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the 7. Define a method to remove the mock */ #define DEFINE_SAI_API_MOCK(sai_object_type) \ - static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ - static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ class mock_sai_##sai_object_type##_api_t \ { \ public: \ @@ -59,20 +63,40 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the [this](REMOVE_PARAMS(sai_object_type)) { \ return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ }); \ + ON_CALL(*this, create_##sai_object_type##_entries) \ + .WillByDefault( \ + [this](CREATE_BULK_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->create_##sai_object_type##_entries(CREATE_BULK_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##_entries) \ + .WillByDefault( \ + [this](REMOVE_BULK_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entries(REMOVE_BULK_ARGS(sai_object_type)); \ + }); \ } \ MOCK_METHOD3(create_##sai_object_type##_entry, sai_status_t(CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type##_entry, sai_status_t(REMOVE_PARAMS(sai_object_type))); \ + MOCK_METHOD6(create_##sai_object_type##_entries, sai_status_t(CREATE_BULK_PARAMS(sai_object_type))); \ + MOCK_METHOD4(remove_##sai_object_type##_entries, sai_status_t(REMOVE_BULK_PARAMS(sai_object_type))); \ }; \ - static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ - inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ + inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ } \ - inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ } \ - inline void apply_sai_##sai_object_type##_api_mock() \ + inline sai_status_t mock_create_##sai_object_type##_entries(CREATE_BULK_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entries(CREATE_BULK_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type##_entries(REMOVE_BULK_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entries(REMOVE_BULK_ARGS(sai_object_type)); \ + } \ + inline void apply_sai_##sai_object_type##_api_mock() \ { \ mock_sai_##sai_object_type##_api = new NiceMock(); \ \ @@ -82,16 +106,18 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the \ sai_##sai_object_type##_api->create_##sai_object_type##_entry = mock_create_##sai_object_type##_entry; \ sai_##sai_object_type##_api->remove_##sai_object_type##_entry = mock_remove_##sai_object_type##_entry; \ + sai_##sai_object_type##_api->create_##sai_object_type##_entries = mock_create_##sai_object_type##_entries; \ + sai_##sai_object_type##_api->remove_##sai_object_type##_entries = mock_remove_##sai_object_type##_entries; \ } \ - inline void remove_sai_##sai_object_type##_api_mock() \ + inline void remove_sai_##sai_object_type##_api_mock() \ { \ sai_##sai_object_type##_api = old_sai_##sai_object_type##_api; \ delete mock_sai_##sai_object_type##_api; \ } #define DEFINE_SAI_GENERIC_API_MOCK(sai_api_name, sai_object_type) \ - static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ - static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ class mock_sai_##sai_api_name##_api_t \ { \ public: \ @@ -111,16 +137,16 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ }; \ - static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ - inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ } \ - inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ } \ - inline void apply_sai_##sai_api_name##_api_mock() \ + inline void apply_sai_##sai_api_name##_api_mock() \ { \ mock_sai_##sai_api_name##_api = new NiceMock(); \ \ @@ -131,7 +157,7 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ } \ - inline void remove_sai_##sai_api_name##_api_mock() \ + inline void remove_sai_##sai_api_name##_api_mock() \ { \ sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ delete mock_sai_##sai_api_name##_api; \ diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index 008b0bd9b5..52aa29d24e 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -5,6 +5,10 @@ #include "orch.h" #undef protected #include "ut_helper.h" +#define private public +#include "neighorch.h" +#include "muxorch.h" +#undef private #include "mock_orchagent_main.h" #include "mock_sai_api.h" #include "mock_orch_test.h" @@ -19,13 +23,21 @@ namespace mux_rollback_test DEFINE_SAI_API_MOCK(route); DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + using ::testing::_; using namespace std; using namespace mock_orch_test; using ::testing::Return; using ::testing::Throw; + using ::testing::DoAll; + using ::testing::SetArrayArgument; static const string TEST_INTERFACE = "Ethernet4"; + sai_bulk_create_neighbor_entry_fn old_create_neighbor_entries; + sai_bulk_remove_neighbor_entry_fn old_remove_neighbor_entries; + sai_bulk_create_route_entry_fn old_create_route_entries; + sai_bulk_remove_route_entry_fn old_remove_route_entries; + class MuxRollbackTest : public MockOrchTest { protected: @@ -131,41 +143,57 @@ namespace mux_rollback_test INIT_SAI_API_MOCK(acl); INIT_SAI_API_MOCK(next_hop); MockSaiApis(); + old_create_neighbor_entries = gNeighOrch->gNeighBulker.create_entries; + old_remove_neighbor_entries = gNeighOrch->gNeighBulker.remove_entries; + old_create_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.create_entries; + old_remove_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.remove_entries; + gNeighOrch->gNeighBulker.create_entries = mock_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = mock_remove_neighbor_entries; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = mock_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = mock_remove_route_entries; } void PreTearDown() override { RestoreSaiApis(); + gNeighOrch->gNeighBulker.create_entries = old_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = old_remove_neighbor_entries; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = old_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = old_remove_route_entries; } }; TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_route_api, create_route_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); SetAndAssertMuxState(STANDBY_STATE); } @@ -201,7 +229,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) .WillOnce(Throw(runtime_error("Mock runtime error"))); SetMuxStateFromAppDb(ACTIVE_STATE); EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); @@ -210,7 +238,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) + EXPECT_CALL(*mock_sai_route_api, create_route_entries) .WillOnce(Throw(runtime_error("Mock runtime error"))); SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); @@ -218,7 +246,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) .WillOnce(Throw(logic_error("Mock logic error"))); SetMuxStateFromAppDb(ACTIVE_STATE); EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); @@ -227,7 +255,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) .WillOnce(Throw(logic_error("Mock logic error"))); SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); @@ -249,4 +277,12 @@ namespace mux_rollback_test SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopTableFullRollbackToActive) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } } diff --git a/tests/test_mux.py b/tests/test_mux.py index 9405312a5a..fce1b4f37c 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -101,6 +101,8 @@ class TestMuxTunnelBase(): DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + BULK_NEIGHBOR_COUNT = 254 + def check_syslog(self, dvs, marker, err_log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) assert num.strip() >= str(expected_cnt) @@ -337,8 +339,66 @@ def del_route(self, dvs, route): ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) ps._del(route) + def wait_for_mux_state(self, dvs, interface, expected_state): + """ + Waits until state change completes - expected state is in state_db + """ + + apdb = dvs.get_app_db() + expected_field = {"state": expected_state} + apdb.wait_for_field_match(self.APP_MUX_CABLE, interface, expected_field) + + def bulk_neighbor_test(self, confdb, appdb, asicdb, dvs, dvs_route): + dvs.runcmd("ip neigh flush all") + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-11-11") + self.set_mux_state(appdb, "Ethernet0", "active") + + class neighbor_info: + ipv4_key = "" + ipv6_key = "" + ipv4 = "" + ipv6 = "" + + def __init__(self, i): + self.ipv4 = "192.168.1." + str(i) + self.ipv6 = "fc02:1001::" + str(i) + + neighbor_list = [neighbor_info(i) for i in range(100, self.BULK_NEIGHBOR_COUNT)] + for neigh_info in neighbor_list: + self.add_neighbor(dvs, neigh_info.ipv4, "00:00:00:00:11:11") + self.add_neighbor(dvs, neigh_info.ipv6, "00:00:00:00:11:11") + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + try: + self.set_mux_state(appdb, "Ethernet0", "standby") + self.wait_for_mux_state(dvs, "Ethernet0", "standby") + + for neigh_info in neighbor_list: + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv4_key) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv6_key) + dvs_route.check_asicdb_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + + self.set_mux_state(appdb, "Ethernet0", "active") + self.wait_for_mux_state(dvs, "Ethernet0", "active") + + for neigh_info in neighbor_list: + dvs_route.check_asicdb_deleted_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + finally: + for neigh_info in neighbor_list: + self.del_neighbor(dvs, neigh_info.ipv4) + self.del_neighbor(dvs, neigh_info.ipv6) + def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): + self.bulk_neighbor_test(confdb, appdb, asicdb, dvs, dvs_route) self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "standby") From 80f52079319df45a650cb54502fa4b70adf4e5ff Mon Sep 17 00:00:00 2001 From: Roy Yi <126022672+royyi8@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:36:03 -0700 Subject: [PATCH 13/14] Add SWSS support for link event damping feature (#2933) What I did Added support for link event damping in SWSS. Required Syncd PR: sonic-net/sonic-sairedis#1297 CLI PR: sonic-net/sonic-utilities#3001 HLD: https://github.com/sonic-net/SONiC/blob/master/doc/link_event_damping/Link-event-damping-HLD.md Why I did it How I verified it Use the config interface damping CLI to set the port attributes on the switch and observe that Syncd processes link event damping parameters. --- orchagent/port.h | 9 + orchagent/port/portcnt.h | 34 +++ orchagent/port/porthlpr.cpp | 108 ++++++++++ orchagent/port/porthlpr.h | 5 + orchagent/port/portschema.h | 6 + orchagent/portsorch.cpp | 123 +++++++++++ orchagent/portsorch.h | 5 + tests/mock_tests/portsorch_ut.cpp | 334 ++++++++++++++++++++++++++++++ tests/test_port.py | 48 +++++ 9 files changed, 672 insertions(+) diff --git a/orchagent/port.h b/orchagent/port.h index d153b20318..0ae9b97b67 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -13,6 +13,7 @@ extern "C" { #include #include +#include #define DEFAULT_PORT_VLAN_ID 1 /* @@ -212,6 +213,14 @@ class Port /* Path Tracing */ uint16_t m_pt_intf_id = 0; sai_port_path_tracing_timestamp_type_t m_pt_timestamp_template = SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23; + + /* link event damping */ + sai_redis_link_event_damping_algorithm_t m_link_event_damping_algorithm = SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED; + uint32_t m_max_suppress_time = 0; + uint32_t m_decay_half_life = 0; + uint32_t m_suppress_threshold = 0; + uint32_t m_reuse_threshold = 0; + uint32_t m_flap_penalty = 0; }; } diff --git a/orchagent/port/portcnt.h b/orchagent/port/portcnt.h index 9e3e63f9b7..33d52231cc 100644 --- a/orchagent/port/portcnt.h +++ b/orchagent/port/portcnt.h @@ -217,6 +217,40 @@ class PortConfig final bool is_set = false; } pt_timestamp_template; // Port timestamp template for Path Tracing + struct { + sai_redis_link_event_damping_algorithm_t value; + bool is_set = false; + } link_event_damping_algorithm; // Port link event damping algorithm + + struct { + + struct { + uint32_t value; + bool is_set = false; + } max_suppress_time; // Max suppress time + + struct { + uint32_t value; + bool is_set = false; + } decay_half_life; // Decay half life + + struct { + uint32_t value; + bool is_set = false; + } suppress_threshold; // Suppress threshold + + struct { + uint32_t value; + bool is_set = false; + } reuse_threshold; // Reuse threshold + + struct { + uint32_t value; + bool is_set = false; + } flap_penalty; // Flap penalty + + } link_event_damping_config; // Port link event damping config + std::string key; std::string op; diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp index 7ac9c15c52..181fef9f69 100644 --- a/orchagent/port/porthlpr.cpp +++ b/orchagent/port/porthlpr.cpp @@ -21,6 +21,7 @@ using namespace swss; // types -------------------------------------------------------------------------------------------------------------- typedef decltype(PortConfig::serdes) PortSerdes_t; +typedef decltype(PortConfig::link_event_damping_config) PortDampingConfig_t; // constants ---------------------------------------------------------------------------------------------------------- @@ -126,6 +127,12 @@ static const std::unordered_map g_linkEventDampingAlgorithmMap = +{ + { "disabled", SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED }, + { "aied", SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_AIED } +}; + // functions ---------------------------------------------------------------------------------------------------------- template @@ -246,6 +253,11 @@ std::string PortHelper::getPtTimestampTemplateStr(const PortConfig &port) const return this->getFieldValueStr(port, PORT_PT_TIMESTAMP_TEMPLATE); } +std::string PortHelper::getDampingAlgorithm(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_DAMPING_ALGO); +} + bool PortHelper::parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const { SWSS_LOG_ENTER(); @@ -786,6 +798,60 @@ bool PortHelper::parsePortSubport(PortConfig &port, const std::string &field, co return true; } +bool PortHelper::parsePortLinkEventDampingAlgorithm(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = g_linkEventDampingAlgorithmMap.find(value); + if (cit == g_linkEventDampingAlgorithmMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.link_event_damping_algorithm.value = cit->second; + port.link_event_damping_algorithm.is_set = true; + + return true; +} + +template +bool PortHelper::parsePortLinkEventDampingConfig(T &damping_config_attr, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + try + { + damping_config_attr.value = to_uint(value); + damping_config_attr.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::max_suppress_time) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::decay_half_life) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::suppress_threshold) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::reuse_threshold) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::flap_penalty) &damping_config_attr, const std::string &field, const std::string &value) const; + bool PortHelper::parsePortPtIntfId(PortConfig &port, const std::string &field, const std::string &value) const { SWSS_LOG_ENTER(); @@ -1121,6 +1187,48 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_DAMPING_ALGO) + { + if (!this->parsePortLinkEventDampingAlgorithm(port, field, value)) + { + return false; + } + } + else if (field == PORT_MAX_SUPPRESS_TIME) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.max_suppress_time, field, value)) + { + return false; + } + } + else if (field == PORT_DECAY_HALF_LIFE) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.decay_half_life, field, value)) + { + return false; + } + } + else if (field == PORT_SUPPRESS_THRESHOLD) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.suppress_threshold, field, value)) + { + return false; + } + } + else if (field == PORT_REUSE_THRESHOLD) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.reuse_threshold, field, value)) + { + return false; + } + } + else if (field == PORT_FLAP_PENALTY) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.flap_penalty, field, value)) + { + return false; + } + } else { SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); diff --git a/orchagent/port/porthlpr.h b/orchagent/port/porthlpr.h index 3852759975..45a4893a39 100644 --- a/orchagent/port/porthlpr.h +++ b/orchagent/port/porthlpr.h @@ -27,6 +27,7 @@ class PortHelper final std::string getLinkTrainingStr(const PortConfig &port) const; std::string getAdminStatusStr(const PortConfig &port) const; std::string getPtTimestampTemplateStr(const PortConfig &port) const; + std::string getDampingAlgorithm(const PortConfig &port) const; bool parsePortConfig(PortConfig &port) const; bool validatePortConfig(PortConfig &port) const; @@ -37,6 +38,10 @@ class PortHelper final template bool parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const; + bool parsePortLinkEventDampingAlgorithm(PortConfig &port, const std::string &field, const std::string &value) const; + template + bool parsePortLinkEventDampingConfig(T &damping_config_attr, const std::string &field, const std::string &value) const; + bool parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const; diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h index c9a3274913..8dd7f79200 100644 --- a/orchagent/port/portschema.h +++ b/orchagent/port/portschema.h @@ -95,3 +95,9 @@ #define PORT_SUBPORT "subport" #define PORT_PT_INTF_ID "pt_interface_id" #define PORT_PT_TIMESTAMP_TEMPLATE "pt_timestamp_template" +#define PORT_DAMPING_ALGO "link_event_damping_algorithm" +#define PORT_MAX_SUPPRESS_TIME "max_suppress_time" +#define PORT_DECAY_HALF_LIFE "decay_half_life" +#define PORT_SUPPRESS_THRESHOLD "suppress_threshold" +#define PORT_REUSE_THRESHOLD "reuse_threshold" +#define PORT_FLAP_PENALTY "flap_penalty" diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index f370d09607..ebce79093c 100644 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -3171,6 +3171,49 @@ task_process_status PortsOrch::setPortLinkTraining(const Port &port, bool state) return task_success; } +ReturnCode PortsOrch::setPortLinkEventDampingAlgorithm(Port &port, + sai_redis_link_event_damping_algorithm_t &link_event_damping_algorithm) +{ + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGORITHM; + attr.value.s32 = link_event_damping_algorithm; + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_port_api->set_port_attribute(port.m_port_id, &attr), + "Failed to set link event damping algorithm (" << link_event_damping_algorithm << ") for port " + << port.m_alias); + + SWSS_LOG_INFO("Set link event damping algorithm %u for port %s", link_event_damping_algorithm, port.m_alias.c_str()); + return ReturnCode(); +} + +ReturnCode PortsOrch::setPortLinkEventDampingAiedConfig(Port &port, + sai_redis_link_event_damping_algo_aied_config_t &config) { + + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGO_AIED_CONFIG; + attr.value.ptr = (void *) &config; + + std::stringstream msg; + msg << "link event damping algorithm aied config for port " << port.m_alias << " - "; + msg << "max_suppress_time: " << config.max_suppress_time << ", "; + msg << "decay_half_life: " << config.decay_half_life << ", "; + msg << "suppress_threshold: " << config.suppress_threshold << ", "; + msg << "reuse_threshold: " << config.reuse_threshold << ", "; + msg << "flap_penalty: " << config.flap_penalty; + + std::string msg_str = msg.str(); + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_port_api->set_port_attribute(port.m_port_id, &attr), "Failed to set " + msg_str); + + SWSS_LOG_INFO("Set %s", msg_str.c_str()); + + return ReturnCode(); +} + bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const { SWSS_LOG_ENTER(); @@ -4033,6 +4076,86 @@ void PortsOrch::doPortTask(Consumer &consumer) } } + if (pCfg.link_event_damping_algorithm.is_set) + { + if (p.m_link_event_damping_algorithm != pCfg.link_event_damping_algorithm.value) + { + auto status = setPortLinkEventDampingAlgorithm(p, pCfg.link_event_damping_algorithm.value); + if (!status.ok()) + { + SWSS_LOG_ERROR( + "Failed to set port %s link event damping algorithm to %s", + p.m_alias.c_str(), m_portHlpr.getDampingAlgorithm(pCfg).c_str() + ); + it = taskMap.erase(it); + continue; + } + + p.m_link_event_damping_algorithm = pCfg.link_event_damping_algorithm.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s link event damping algorithm to %s", + p.m_alias.c_str(), m_portHlpr.getDampingAlgorithm(pCfg).c_str() + ); + } + } + + sai_redis_link_event_damping_algo_aied_config_t aied_config = { + p.m_max_suppress_time, + p.m_suppress_threshold, + p.m_reuse_threshold, + p.m_decay_half_life, + p.m_flap_penalty, + }; + + if (pCfg.link_event_damping_config.max_suppress_time.is_set) + { + aied_config.max_suppress_time = pCfg.link_event_damping_config.max_suppress_time.value; + } + if (pCfg.link_event_damping_config.decay_half_life.is_set) + { + aied_config.decay_half_life = pCfg.link_event_damping_config.decay_half_life.value; + } + if (pCfg.link_event_damping_config.suppress_threshold.is_set) + { + aied_config.suppress_threshold = pCfg.link_event_damping_config.suppress_threshold.value; + } + if (pCfg.link_event_damping_config.reuse_threshold.is_set) + { + aied_config.reuse_threshold = pCfg.link_event_damping_config.reuse_threshold.value; + } + if (pCfg.link_event_damping_config.flap_penalty.is_set) + { + aied_config.flap_penalty = pCfg.link_event_damping_config.flap_penalty.value; + } + + bool config_changed = !(aied_config.max_suppress_time == p.m_max_suppress_time && + aied_config.decay_half_life == p.m_decay_half_life && + aied_config.suppress_threshold == p.m_suppress_threshold && + aied_config.reuse_threshold == p.m_reuse_threshold && + aied_config.flap_penalty == p.m_flap_penalty); + + if (config_changed) + { + auto status = setPortLinkEventDampingAiedConfig(p, aied_config); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to set port %s link event damping config", p.m_alias.c_str()); + it = taskMap.erase(it); + continue; + } + + p.m_max_suppress_time = aied_config.max_suppress_time; + p.m_decay_half_life = aied_config.decay_half_life; + p.m_suppress_threshold = aied_config.suppress_threshold; + p.m_reuse_threshold = aied_config.reuse_threshold; + p.m_flap_penalty = aied_config.flap_penalty; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE("Set port %s link event damping config successfully", p.m_alias.c_str()); + } + if (pCfg.speed.is_set) { if (p.m_speed != pCfg.speed.value) diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 70f6248fda..3ae283fb80 100644 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -462,6 +462,11 @@ class PortsOrch : public Orch, public Subject task_process_status setPortAdvInterfaceTypes(Port &port, std::set &interface_types); task_process_status setPortLinkTraining(const Port& port, bool state); + ReturnCode setPortLinkEventDampingAlgorithm(Port &port, + sai_redis_link_event_damping_algorithm_t &link_event_damping_algorithm); + ReturnCode setPortLinkEventDampingAiedConfig(Port &port, + sai_redis_link_event_damping_algo_aied_config_t &config); + void updatePortOperStatus(Port &port, sai_port_oper_status_t status); bool getPortOperSpeed(const Port& port, sai_uint32_t& speed) const; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index afa26dc439..22f8632af1 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -92,6 +92,12 @@ namespace portsorch_test uint32_t set_pt_interface_id_failures; uint32_t set_pt_timestamp_template_failures; uint32_t set_port_tam_failures; + bool set_link_event_damping_success = true; + uint32_t _sai_set_link_event_damping_algorithm_count; + uint32_t _sai_set_link_event_damping_config_count; + int32_t _sai_link_event_damping_algorithm = 0; + sai_redis_link_event_damping_algo_aied_config_t _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + sai_status_t _ut_stub_sai_set_port_attribute( _In_ sai_object_id_t port_id, _In_ const sai_attribute_t *attr) @@ -148,6 +154,26 @@ namespace portsorch_test return SAI_STATUS_INVALID_ATTR_VALUE_0; } } + else if (attr[0].id == SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGORITHM) + { + _sai_set_link_event_damping_algorithm_count++; + + if (set_link_event_damping_success) { + _sai_link_event_damping_algorithm = attr[0].value.s32; + return SAI_STATUS_SUCCESS; + } + return SAI_STATUS_FAILURE; + } + else if (attr[0].id == SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGO_AIED_CONFIG) + { + _sai_set_link_event_damping_config_count++; + + if (set_link_event_damping_success) { + _sai_link_event_damping_config = *(reinterpret_cast(attr[0].value.ptr)); + return SAI_STATUS_SUCCESS; + } + return SAI_STATUS_FAILURE; + } return pold_sai_port_api->set_port_attribute(port_id, attr); } @@ -1671,6 +1697,314 @@ namespace portsorch_test _unhook_sai_bridge_api(); } + TEST_F(PortsOrchTest, SupportedLinkEventDampingAlgorithmSuccess) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "aied"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // verify SAI call was made and set algorithm successfully + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_AIED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SupportedLinkEventDampingAlgorithmFailure) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = false; + _sai_link_event_damping_algorithm = SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "aied"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that SAI call was made, algorithm not set + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, NotSupportedLinkEventDampingAlgorithm) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "test_algo"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that no SAI call was made + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingFullConfigSuccess) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"max_suppress_time", "64000"}, + {"decay_half_life", "45000"}, + {"suppress_threshold", "1650"}, + {"reuse_threshold", "1500"}, + {"flap_penalty", "1000"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 64000); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 45000); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 1650); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 1500); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 1000); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingPartialConfigSuccess) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"decay_half_life", "30000"}, + {"reuse_threshold", "1200"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 0); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 30000); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 1200); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 0); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingConfigFailure) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = false; + _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"max_suppress_time", "64000"}, + {"decay_half_life", "45000"}, + {"suppress_threshold", "1650"}, + {"reuse_threshold", "1500"}, + {"flap_penalty", "1000"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that config is not set + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 0); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 0); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 0); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortSupportedFecModes) { _hook_sai_port_api(); diff --git a/tests/test_port.py b/tests/test_port.py index d7bf62d2d7..feccb6917a 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -432,6 +432,54 @@ def test_PortPathTracing(self, dvs, testlog): for key, queue in buffer_queues.items(): dvs.get_config_db().update_entry("BUFFER_QUEUE", key, queue) + def test_PortLinkEventDamping(self, dvs, testlog): + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + + cfg_tbl = swsscommon.Table(cdb, "PORT") + app_tbl = swsscommon.Table(pdb, "PORT_TABLE") + port_name = "Ethernet0" + + # Set link event damping. + fvs = swsscommon.FieldValuePairs([("link_event_damping_algorithm", "aied"), + ("max_suppress_time", "54000"), + ("decay_half_life", "45000"), + ("suppress_threshold", "1650"), + ("reuse_threshold", "1500"), + ("flap_penalty", "1000") + ]) + cfg_tbl.set(port_name, fvs) + time.sleep(1) + + # Check application database. + (status, fvs) = app_tbl.get(port_name) + assert status == True + for fv in fvs: + if fv[0] == "link_event_damping_algorithm": + assert fv[1] == "aied" + elif fv[0] == "max_suppress_time": + assert fv[1] == "54000" + elif fv[0] == "decay_half_life": + assert fv[1] == "45000" + elif fv[0] == "suppress_threshold": + assert fv[1] == "1650" + elif fv[0] == "reuse_threshold": + assert fv[1] == "1500" + elif fv[0] == "flap_penalty": + assert fv[1] == "1000" + + # Disable link event damping. + fvs = swsscommon.FieldValuePairs([("link_event_damping_algorithm", "disabled")]) + cfg_tbl.set(port_name, fvs) + time.sleep(1) + + # Check application database. + (status, fvs) = app_tbl.get(port_name) + assert status == True + for fv in fvs: + if fv[0] == "link_event_damping_algorithm": + assert fv[1] == "disabled" + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying From fff544e6b4a5f5a8b9cd901b50c26509e6e69389 Mon Sep 17 00:00:00 2001 From: mint570 <70396898+mint570@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:36:39 -0700 Subject: [PATCH 14/14] Rotate record file before writing new log. (#3158) What I did Rotate file before writing the log for record files. Why I did it If we configure logrotate to compress, the old file stream will write to void after the old file is rotated and compressed. This PR changes the order of write and rotate. It will always rotate first and then write. There might still be log lost if logrotate sends HUP signal too late. --- lib/recorder.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/recorder.cpp b/lib/recorder.cpp index 449039adff..e9af745cdf 100644 --- a/lib/recorder.cpp +++ b/lib/recorder.cpp @@ -93,12 +93,12 @@ void RecWriter::record(const std::string& val) { return ; } - record_ofs << swss::getTimestamp() << "|" << val << std::endl; if (isRotate()) { setRotate(false); logfileReopen(); } + record_ofs << swss::getTimestamp() << "|" << val << std::endl; }