diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index e276bd332d..2610fcb837 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -90,6 +90,7 @@ jobs: artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}' + allowPartiallySucceededBuilds: true path: $(Build.ArtifactStagingDirectory)/download/sairedis patterns: | ${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index 9c7e84b208..0a680e35de 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -10,7 +10,7 @@ parameters: - name: pool type: string values: - - sonicbld + - sonicbld-1es - sonicbld-armhf - sonicbld-arm64 - default diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 9bce6feccd..9b13a85502 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -8,7 +8,7 @@ parameters: - name: pool type: string values: - - sonicbld + - sonicbld-1es - default default: default diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 9eca60f0b6..db66b03472 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -91,7 +91,8 @@ jobs: sudo apt-add-repository https://packages.microsoft.com/ubuntu/20.04/prod sudo apt-get update sudo apt-get install -y dotnet-sdk-7.0 - sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin + sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin 2>&1 | tee log.log || grep 'already installed' log.log + rm log.log displayName: "Install .NET CORE" - script: | diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e3255ba15b..f345319c03 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -41,7 +41,7 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - pool: sonicbld + pool: sonicbld-1es sonic_slave: sonic-slave-bullseye common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common @@ -56,7 +56,7 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - pool: sonicbld + pool: sonicbld-1es sonic_slave: sonic-slave-bullseye common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common @@ -99,7 +99,7 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - pool: sonicbld + pool: sonicbld-1es sonic_slave: sonic-slave-bookworm common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common-bookworm diff --git a/cfgmgr/fabricmgr.cpp b/cfgmgr/fabricmgr.cpp index 16a8111199..bb2420387c 100644 --- a/cfgmgr/fabricmgr.cpp +++ b/cfgmgr/fabricmgr.cpp @@ -41,6 +41,7 @@ void FabricMgr::doTask(Consumer &consumer) string monPollThreshRecovery, monPollThreshIsolation; string isolateStatus; string alias, lanes; + string enable; std::vector field_values; string value; @@ -66,6 +67,12 @@ void FabricMgr::doTask(Consumer &consumer) monPollThreshIsolation = fvValue(i); writeConfigToAppDb(key, "monPollThreshIsolation", monPollThreshIsolation); } + else if (fvField(i) == "monState") + { + SWSS_LOG_INFO("Enable fabric monitoring setting in appl_db."); + enable = fvValue(i); + writeConfigToAppDb(key, "monState", enable); + } else if (fvField(i) == "alias") { alias = fvValue(i); diff --git a/cfgmgr/fabricmgr.h b/cfgmgr/fabricmgr.h index 1fd399fef9..afadd26d57 100644 --- a/cfgmgr/fabricmgr.h +++ b/cfgmgr/fabricmgr.h @@ -20,7 +20,7 @@ class FabricMgr : public Orch private: Table m_cfgFabricMonitorTable; Table m_cfgFabricPortTable; - Table m_appFabricMonitorTable; + ProducerStateTable m_appFabricMonitorTable; ProducerStateTable m_appFabricPortTable; void doTask(Consumer &consumer); diff --git a/lib/recorder.cpp b/lib/recorder.cpp index 449039adff..e9af745cdf 100644 --- a/lib/recorder.cpp +++ b/lib/recorder.cpp @@ -93,12 +93,12 @@ void RecWriter::record(const std::string& val) { return ; } - record_ofs << swss::getTimestamp() << "|" << val << std::endl; if (isRotate()) { setRotate(false); logfileReopen(); } + record_ofs << swss::getTimestamp() << "|" << val << std::endl; } diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index b9f6283fce..80a938e38e 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -101,7 +101,36 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vectorstart(); + bool fabricPortMonitor = checkFabricPortMonState(); + if (fabricPortMonitor) + { + m_debugTimer->start(); + SWSS_LOG_INFO("Fabric monitor starts at init time"); + } +} + +bool FabricPortsOrch::checkFabricPortMonState() +{ + bool enabled = false; + std::vector constValues; + bool setCfgVal = m_applMonitorConstTable->get("FABRIC_MONITOR_DATA", constValues); + if (!setCfgVal) + { + return enabled; + } + SWSS_LOG_INFO("FabricPortsOrch::checkFabricPortMonState starts"); + for (auto cv : constValues) + { + if (fvField(cv) == "monState") + { + if (fvValue(cv) == "enable") + { + enabled = true; + return enabled; + } + } + } + return enabled; } int FabricPortsOrch::getFabricPortList() @@ -516,6 +545,7 @@ void FabricPortsOrch::updateFabricDebugCounters() int autoIsolated = 0; int cfgIsolated = 0; int isolated = 0; + int origIsolated = 0; string lnkStatus = "down"; string testState = "product"; @@ -614,6 +644,12 @@ void FabricPortsOrch::updateFabricDebugCounters() if (fvField(val) == "AUTO_ISOLATED") { autoIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s currently autoisolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "ISOLATED") + { + origIsolated = to_uint(valuePt); SWSS_LOG_INFO("port %s currently isolated: %s", key.c_str(),valuePt.c_str()); continue; } @@ -787,6 +823,36 @@ void FabricPortsOrch::updateFabricDebugCounters() } // if "ISOLATED" is true, Call SAI api here to actually isolated the link // if "ISOLATED" is false, Call SAP api to actually unisolate the link + + if (origIsolated != isolated) + { + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_FABRIC_ISOLATE; + bool setVal = false; + if (isolated == 1) + { + setVal = true; + } + attr.value.booldata = setVal; + SWSS_LOG_NOTICE("Set fabric port %d with isolate %d ", lane, isolated); + if (m_fabricLanePortMap.find(lane) == m_fabricLanePortMap.end()) + { + SWSS_LOG_NOTICE("NOT find fabric lane %d ", lane); + } + else + { + sai_status_t status = sai_port_api->set_port_attribute(m_fabricLanePortMap[lane], &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set admin status"); + } + SWSS_LOG_NOTICE("Set fabric port %d state done %d ", lane, isolated); + } + } + else + { + SWSS_LOG_INFO( "Same isolation status for %d", lane); + } } else { @@ -1188,7 +1254,12 @@ void FabricPortsOrch::doTask() void FabricPortsOrch::doFabricPortTask(Consumer &consumer) { - SWSS_LOG_NOTICE("FabricPortsOrch::doFabricPortTask"); + if (!checkFabricPortMonState()) + { + SWSS_LOG_INFO("doFabricPortTask returns early due to feature disabled"); + return; + } + SWSS_LOG_INFO("FabricPortsOrch::doFabricPortTask starts"); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -1275,9 +1346,6 @@ void FabricPortsOrch::doFabricPortTask(Consumer &consumer) } SWSS_LOG_NOTICE("key %s alias %s isolateStatus %s lanes %s", key.c_str(), alias.c_str(), isolateStatus.c_str(), lanes.c_str()); - // Call SAI api to isolate/unisolate the link here. - // Isolate the link if isolateStatus is True. - // Unisolate the link if isolateStatus is False. if (isolateStatus == "False") { @@ -1338,6 +1406,26 @@ void FabricPortsOrch::doFabricPortTask(Consumer &consumer) // AUTO_ISOLATED 0 m_stateTable->hset(state_key, "AUTO_ISOLATED", m_defaultAutoIsolated.c_str()); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_FABRIC_ISOLATE; + bool setVal = false; + attr.value.booldata = setVal; + SWSS_LOG_NOTICE("Set port %s to unisolate %s ", alias.c_str(), isolateStatus.c_str()); + int idx = stoi(lanes); + if (m_fabricLanePortMap.find(idx) == m_fabricLanePortMap.end()) + { + SWSS_LOG_NOTICE("NOT find %s alias. ", alias.c_str()); + } + else + { + sai_status_t status = sai_port_api->set_port_attribute(m_fabricLanePortMap[idx], &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set admin status"); + } + SWSS_LOG_NOTICE( "Set Port %s unisolation state done", alias.c_str()); + } } } } @@ -1350,11 +1438,38 @@ void FabricPortsOrch::doTask(Consumer &consumer) SWSS_LOG_NOTICE("doTask from FabricPortsOrch"); string table_name = consumer.getTableName(); + SWSS_LOG_INFO("Table name: %s", table_name.c_str()); if (table_name == APP_FABRIC_MONITOR_PORT_TABLE_NAME) { doFabricPortTask(consumer); } + if (table_name == APP_FABRIC_MONITOR_DATA_TABLE_NAME) + { + SWSS_LOG_INFO("doTask for APP_FABRIC_MONITOR_DATA_TABLE_NAME"); + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "monState") + { + if (fvValue(i) == "enable") + { + m_debugTimer->start(); + SWSS_LOG_INFO("debugTimer started"); + } + else + { + m_debugTimer->stop(); + SWSS_LOG_INFO("debugTimer stopped"); + } + } + } + it = consumer.m_toSync.erase(it); + } + } } void FabricPortsOrch::doTask(swss::SelectableTimer &timer) @@ -1384,6 +1499,7 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer) if (m_getFabricPortListDone) { + SWSS_LOG_INFO("Fabric monitor enabled"); updateFabricDebugCounters(); updateFabricCapacity(); updateFabricRate(); diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index 3a7cb52f04..d94ece698e 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -65,6 +65,7 @@ class FabricPortsOrch : public Orch, public Subject void updateFabricPortState(); void updateFabricDebugCounters(); void updateFabricCapacity(); + bool checkFabricPortMonState(); void updateFabricRate(); void doTask() override; diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index a02c628fcf..dc4e797bea 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -715,7 +715,7 @@ void IntfsOrch::doTask(Consumer &consumer) bool mpls = false; string vlan = ""; string loopbackAction = ""; - + string oper_status =""; for (auto idx : data) { const auto &field = fvField(idx); @@ -807,6 +807,10 @@ void IntfsOrch::doTask(Consumer &consumer) { loopbackAction = value; } + else if (field == "oper_status") + { + oper_status = value; + } } if (alias == "eth0" || alias == "docker0") @@ -860,7 +864,19 @@ void IntfsOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); continue; } - + if(table_name == CHASSIS_APP_SYSTEM_INTERFACE_TABLE_NAME) + { + if(isRemoteSystemPortIntf(alias)) + { + SWSS_LOG_INFO("Handle remote systemport intf %s, oper status %s", alias.c_str(), oper_status.c_str()); + bool isUp = (oper_status == "up") ? true : false; + if (!gNeighOrch->ifChangeInformRemoteNextHop(alias, isUp)) + { + SWSS_LOG_WARN("Unable to update the nexthop for port %s, oper status %s", alias.c_str(), oper_status.c_str()); + } + + } + } //Voq Inband interface config processing if(inband_type.size() && !ip_prefix_in_key) { @@ -1656,7 +1672,10 @@ void IntfsOrch::voqSyncAddIntf(string &alias) return; } - FieldValueTuple nullFv ("NULL", "NULL"); + + string oper_status = port.m_oper_status == SAI_PORT_OPER_STATUS_UP ? "up" : "down"; + + FieldValueTuple nullFv ("oper_status", oper_status); vector attrs; attrs.push_back(nullFv); @@ -1696,3 +1715,30 @@ void IntfsOrch::voqSyncDelIntf(string &alias) m_tableVoqSystemInterfaceTable->del(alias); } +void IntfsOrch::voqSyncIntfState(string &alias, bool isUp) +{ + Port port; + string port_alias; + if(gPortsOrch->getPort(alias, port)) + { + if (port.m_type == Port::LAG) + { + if (port.m_system_lag_info.switch_id != gVoqMySwitchId) + { + return; + } + port_alias = port.m_system_lag_info.alias; + } + else + { + if(port.m_system_port_info.type == SAI_SYSTEM_PORT_TYPE_REMOTE) + { + return; + } + port_alias = port.m_system_port_info.alias; + } + SWSS_LOG_NOTICE("Syncing system interface state %s for port %s", isUp ? "up" : "down", port_alias.c_str()); + m_tableVoqSystemInterfaceTable->hset(port_alias, "oper_status", isUp ? "up" : "down"); + } + +} \ No newline at end of file diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index 71d89be725..aa5129bef4 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -72,6 +72,7 @@ class IntfsOrch : public Orch bool isRemoteSystemPortIntf(string alias); bool isLocalSystemPortIntf(string alias); + void voqSyncIntfState(string &alias, bool); private: diff --git a/orchagent/main.cpp b/orchagent/main.cpp index ad03648a7d..0a804eb38c 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -588,6 +588,28 @@ int main(int argc, char **argv) attr.value.u32 = SAI_SWITCH_TYPE_FABRIC; attrs.push_back(attr); + //Read switch_id from config_db. + Table cfgDeviceMetaDataTable(&config_db, CFG_DEVICE_METADATA_TABLE_NAME); + string value; + if (cfgDeviceMetaDataTable.hget("localhost", "switch_id", value)) + { + if (value.size()) + { + gVoqMySwitchId = stoi(value); + } + + if (gVoqMySwitchId < 0) + { + SWSS_LOG_ERROR("Invalid fabric switch id %d configured", gVoqMySwitchId); + exit(EXIT_FAILURE); + } + } + else + { + SWSS_LOG_ERROR("Fabric switch id is not configured"); + exit(EXIT_FAILURE); + } + attr.id = SAI_SWITCH_ATTR_SWITCH_ID; attr.value.u32 = gVoqMySwitchId; attrs.push_back(attr); diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index ea3ade347c..ce6bf2baf2 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -744,6 +744,8 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -751,13 +753,21 @@ bool MuxNbrHandler::enable(bool update_rt) SWSS_LOG_INFO("Enabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->enableNeighbor(neigh)) - { - SWSS_LOG_INFO("Enabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); + it++; + } + + if (!gNeighOrch->enableNeighbors(neigh_ctx_list)) + { + return false; + } + it = neighbors_.begin(); + while (it != neighbors_.end()) + { /* Update NH to point to learned neighbor */ + neigh = NeighborEntry(it->first, alias_); it->second = gNeighOrch->getLocalNextHopId(neigh); /* Reprogram route */ @@ -795,22 +805,26 @@ bool MuxNbrHandler::enable(bool update_rt) IpPrefix pfx = it->first.to_string(); if (update_rt) { - if (remove_route(pfx) != SAI_STATUS_SUCCESS) - { - return false; - } + route_ctx_list.push_back(MuxRouteBulkContext(pfx)); updateTunnelRoute(nh_key, false); } it++; } + if (update_rt && !removeRoutes(route_ctx_list)) + { + return false; + } + return true; } bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -852,21 +866,25 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) updateTunnelRoute(nh_key, true); IpPrefix pfx = it->first.to_string(); - if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) - { - return false; - } + route_ctx_list.push_back(MuxRouteBulkContext(pfx, it->second)); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->disableNeighbor(neigh)) - { - SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); it++; } + if (!addRoutes(route_ctx_list)) + { + return false; + } + + if (!gNeighOrch->disableNeighbors(neigh_ctx_list)) + { + return false; + } + return true; } @@ -881,6 +899,141 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } +bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) +{ + sai_status_t status; + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Adding route entry %s, nh %" PRIx64 " to bulker", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); + + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = ctx->nh; + attrs.push_back(attr); + + status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); + } + + gRouteBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_INFO("Tunnel route to %s already exists", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", + ctx->pfx.getIp().to_string().c_str(), ctx->nh, status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Created tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + + gRouteBulker.clear(); + return ret; +} + +bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) +{ + sai_status_t status; + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Removing route entry %s, nh %" PRIx64 "", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); + } + + gRouteBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_INFO("Tunnel route to %s already removed", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", + ctx->pfx.getIp().to_string().c_str(), status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + + gRouteBulker.clear(); + return ret; +} + void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) { MuxOrch* mux_orch = gDirectory.get(); diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 22f01ce27d..3a6d165db4 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -10,6 +10,7 @@ #include "tunneldecaporch.h" #include "aclorch.h" #include "neighorch.h" +#include "bulker.h" enum MuxState { @@ -35,6 +36,26 @@ enum MuxCableType ACTIVE_ACTIVE }; +struct MuxRouteBulkContext +{ + std::deque object_statuses; // Bulk statuses + IpPrefix pfx; // Route prefix + sai_object_id_t nh; // nexthop id + + MuxRouteBulkContext(IpPrefix pfx) + : pfx(pfx) + { + } + + MuxRouteBulkContext(IpPrefix pfx, sai_object_id_t nh) + : pfx(pfx), nh(nh) + { + } +}; + +extern size_t gMaxBulkSize; +extern sai_route_api_t* sai_route_api; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -64,7 +85,7 @@ typedef std::map MuxNeighbor; class MuxNbrHandler { public: - MuxNbrHandler() = default; + MuxNbrHandler() : gRouteBulker(sai_route_api, gMaxBulkSize) {}; bool enable(bool update_rt); bool disable(sai_object_id_t); @@ -75,11 +96,15 @@ class MuxNbrHandler string getAlias() const { return alias_; }; private: + bool removeRoutes(std::list& bulk_ctx_list); + bool addRoutes(std::list& bulk_ctx_list); + inline void updateTunnelRoute(NextHopKey, bool = true); private: MuxNeighbor neighbors_; string alias_; + EntityBulker gRouteBulker; }; // Mux Cable object diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index a2bdebbc62..df96405791 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -22,10 +22,12 @@ extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; extern BfdOrch *gBfdOrch; +extern size_t gMaxBulkSize; const int neighorch_pri = 30; NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, FdbOrch *fdbOrch, PortsOrch *portsOrch, DBConnector *chassisAppDb) : + gNeighBulker(sai_neighbor_api, gMaxBulkSize), Orch(appDb, tableName, neighorch_pri), m_intfsOrch(intfsOrch), m_fdbOrch(fdbOrch), @@ -340,6 +342,8 @@ bool NeighOrch::setNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_flag auto nhop = m_syncdNextHops.find(nexthop); bool rc = false; + SWSS_LOG_INFO("setNextHopFlag on %s seen on port %s ", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); assert(nhop != m_syncdNextHops.end()); if (nhop->second.nh_flags & nh_flag) @@ -379,6 +383,8 @@ bool NeighOrch::clearNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_fl nhop->second.nh_flags &= ~nh_flag; uint32_t count; + SWSS_LOG_INFO("clearnexthop on %s seen on port %s ", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); switch (nh_flag) { case NHFLAGS_IFDOWN: @@ -789,6 +795,8 @@ void NeighOrch::doTask(Consumer &consumer) NeighborEntry neighbor_entry = { ip_address, alias }; + NeighborContext ctx = NeighborContext(neighbor_entry); + if (op == SET_COMMAND) { Port p; @@ -814,6 +822,8 @@ void NeighOrch::doTask(Consumer &consumer) mac_address = MacAddress(fvValue(*i)); } + ctx.mac = mac_address; + bool nbr_not_found = (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()); if (nbr_not_found || m_syncdNeighbors[neighbor_entry].mac != mac_address) { @@ -842,7 +852,7 @@ void NeighOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); } } - else if (addNeighbor(neighbor_entry, mac_address)) + else if (addNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -873,7 +883,7 @@ void NeighOrch::doTask(Consumer &consumer) { if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { - if (removeNeighbor(neighbor_entry)) + if (removeNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -894,13 +904,18 @@ void NeighOrch::doTask(Consumer &consumer) } } -bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress &macAddress) +bool NeighOrch::addNeighbor(NeighborContext& ctx) { SWSS_LOG_ENTER(); sai_status_t status; + auto& object_statuses = ctx.object_statuses; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; IpAddress ip_address = neighborEntry.ip_address; string alias = neighborEntry.alias; + bool bulk_op = ctx.bulk_op; sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); if (rif_id == SAI_NULL_OBJECT_ID) @@ -945,12 +960,37 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress NeighborEntry temp_entry = { ip_address, vlan_port }; if (m_syncdNeighbors.find(temp_entry) != m_syncdNeighbors.end()) { - SWSS_LOG_NOTICE("Neighbor %s on %s already exists, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); - if (!removeNeighbor(temp_entry)) + // Neighbor already exists on another VLAN. If they belong to the same VRF, delete the old neighbor + Port existing_vlan, new_vlan; + if (!gPortsOrch->getPort(vlan_port, new_vlan)) { - SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + SWSS_LOG_ERROR("Failed to get port for %s", vlan_port.c_str()); return false; } + if (!gPortsOrch->getPort(alias, existing_vlan)) + { + SWSS_LOG_ERROR("Failed to get port for %s", alias.c_str()); + return false; + } + if (existing_vlan.m_vr_id == new_vlan.m_vr_id) + { + std::string vrf_name = gDirectory.get()->getVRFname(existing_vlan.m_vr_id); + if (vrf_name.empty()) + { + SWSS_LOG_NOTICE("Neighbor %s already learned on %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); + } + else + { + SWSS_LOG_NOTICE("Neighbor %s already learned on %s in VRF %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str(), vrf_name.c_str()); + } + + NeighborContext removeContext = NeighborContext(temp_entry); + if (!removeNeighbor(removeContext)) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + return false; + } + } } } @@ -967,6 +1007,15 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) { + // Using bulker, return and post-process later + if (bulk_op) + { + SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); + object_statuses.emplace_back(); + gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); + return true; + } + status = sai_neighbor_api->create_neighbor_entry(&neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -1063,13 +1112,17 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress return true; } -bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) +bool NeighOrch::removeNeighbor(NeighborContext& ctx, bool disable) { SWSS_LOG_ENTER(); sai_status_t status; - IpAddress ip_address = neighborEntry.ip_address; + auto& object_statuses = ctx.object_statuses; + + const NeighborEntry neighborEntry = ctx.neighborEntry; string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + bool bulk_op = ctx.bulk_op; NextHopKey nexthop = { ip_address, alias }; if(m_intfsOrch->isRemoteSystemPortIntf(alias)) @@ -1140,6 +1193,13 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) SWSS_LOG_NOTICE("Removed next hop %s on %s", ip_address.to_string().c_str(), alias.c_str()); + if (bulk_op) + { + object_statuses.emplace_back(); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); + return true; + } + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); if (status != SAI_STATUS_SUCCESS) { @@ -1199,6 +1259,185 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) return true; } +/* Process bulk ctx entry and enable the neigbor */ +bool NeighOrch::processBulkEnableNeighbor(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (!ctx.bulk_op) + { + SWSS_LOG_INFO("Not a bulk entry for %s on %s", ip_address.to_string().c_str(), alias.c_str()); + return true; + } + + SWSS_LOG_INFO("Checking neighbor create entry status %s on %s.", ip_address.to_string().c_str(), alias.c_str()); + + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + if (rif_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif_id for %s", alias.c_str()); + return false; + } + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + MuxOrch* mux_orch = gDirectory.get(); + if (mux_orch->isNeighborActive(ip_address, macAddress, alias)) + { + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_INFO("Neighbor exists: neighbor %s on %s, skipping: status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + return true; + } + else + { + SWSS_LOG_ERROR("Failed to create neighbor %s on %s, status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + SWSS_LOG_NOTICE("Created neighbor ip %s, %s on %s", ip_address.to_string().c_str(), + macAddress.to_string().c_str(), alias.c_str()); + + m_intfsOrch->increaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + if (!addNextHop(NextHopKey(ip_address, alias))) + { + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + return false; + } + } + + m_syncdNeighbors[neighborEntry] = { macAddress, true }; + + NeighborUpdate update = { neighborEntry, macAddress, true }; + notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + return true; +} + +/* Process bulk ctx entry and disable the neigbor */ +bool NeighOrch::processBulkDisableNeighbor(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + return true; + } + + SWSS_LOG_INFO("Checking neighbor remove entry status %s on %s.", ip_address.to_string().c_str(), m_syncdNeighbors[neighborEntry].mac.to_string().c_str()); + + if (isHwConfigured(neighborEntry)) + { + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("Bulk remove entry skipped, neighbor %s on %s already removed, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + } + else + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + else + { + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + removeNextHop(ip_address, alias); + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + SWSS_LOG_NOTICE("Removed neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + } + } + + /* Do not delete entry from cache for disable request */ + m_syncdNeighbors[neighborEntry].hw_configured = false; + return true; +} + bool NeighOrch::isHwConfigured(const NeighborEntry& neighborEntry) { if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) @@ -1225,7 +1464,11 @@ bool NeighOrch::enableNeighbor(const NeighborEntry& neighborEntry) return true; } - return addNeighbor(neighborEntry, m_syncdNeighbors[neighborEntry].mac); + NeighborEntry neigh = neighborEntry; + NeighborContext ctx = NeighborContext(neigh); + ctx.mac = m_syncdNeighbors[neighborEntry].mac; + + return addNeighbor(ctx); } bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) @@ -1244,7 +1487,108 @@ bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) return true; } - return removeNeighbor(neighborEntry, true); + NeighborContext ctx = NeighborContext(neighborEntry); + + return removeNeighbor(ctx, true); +} + +/* enable neighbors using bulker */ +bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) +{ + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + if (isHwConfigured(neighborEntry)) + { + SWSS_LOG_INFO("Neighbor %s is already programmed to HW", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + SWSS_LOG_NOTICE("Neighbor enable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!addNeighbor(*ctx)) + { + SWSS_LOG_ERROR("Neighbor %s create entry failed.", neighborEntry.ip_address.to_string().c_str()); + continue; + } + } + + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + if (ctx->object_statuses.empty()) + { + continue; + } + + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!processBulkEnableNeighbor(*ctx)) + { + SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries */ + ret = false; + } + } + + gNeighBulker.clear(); + return ret; +} + +/* disable neighbors using bulker */ +bool NeighOrch::disableNeighbors(std::list& bulk_ctx_list) +{ + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!removeNeighbor(*ctx, true)) + { + SWSS_LOG_ERROR("Neighbor %s remove entry failed.", neighborEntry.ip_address.to_string().c_str()); + } + } + + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + if (ctx->object_statuses.empty()) + { + continue; + } + + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!processBulkDisableNeighbor(*ctx)) + { + SWSS_LOG_INFO("Disable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries but return false */ + ret = false; + } + } + + gNeighBulker.clear(); + return ret; } sai_object_id_t NeighOrch::addTunnelNextHop(const NextHopKey& nh) @@ -1426,7 +1770,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) SWSS_LOG_NOTICE("VOQ encap index set failed for neighbor %s. Removing and re-adding", kfvKey(t).c_str()); //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); @@ -1457,7 +1802,9 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) } //Add neigh to SAI - if (addNeighbor(neighbor_entry, mac_address)) + NeighborContext ctx = NeighborContext(neighbor_entry); + ctx.mac = mac_address; + if (addNeighbor(ctx)) { //neigh successfully added to SAI. Set STATE DB to signal kernel programming by neighbor manager @@ -1510,7 +1857,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); @@ -1877,3 +2225,30 @@ bool NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddre return false; } + +bool NeighOrch::ifChangeInformRemoteNextHop(const string &alias, bool if_up) +{ + SWSS_LOG_ENTER(); + bool rc = true; + Port inbp; + gPortsOrch->getInbandPort(inbp); + for (auto nbr = m_syncdNeighbors.begin(); nbr != m_syncdNeighbors.end(); ++nbr) + { + if (nbr->first.alias != alias) + { + continue; + } + SWSS_LOG_INFO("Found remote Neighbor %s on %s", nbr->first.ip_address.to_string().c_str(), alias.c_str()); + NextHopKey nhop = { nbr->first.ip_address, inbp.m_alias }; + + if (if_up) + { + rc = clearNextHopFlag(nhop, NHFLAGS_IFDOWN); + } + else + { + rc = setNextHopFlag(nhop, NHFLAGS_IFDOWN); + } + } + return rc; +} \ No newline at end of file diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index e72979ad07..0b59181db1 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -12,6 +12,7 @@ #include "producerstatetable.h" #include "schema.h" #include "bfdorch.h" +#include "bulker.h" #define NHFLAGS_IFDOWN 0x1 // nexthop's outbound i/f is down @@ -43,6 +44,27 @@ struct NeighborUpdate bool add; }; +/* + * Keeps track of neighbor entry information primarily for bulk operations + */ +struct NeighborContext +{ + NeighborEntry neighborEntry; // neighbor entry to process + std::deque object_statuses; // bulk statuses + MacAddress mac; // neighbor mac + bool bulk_op = false; // use bulker (only for mux use for now) + + NeighborContext(NeighborEntry neighborEntry) + : neighborEntry(neighborEntry) + { + } + + NeighborContext(NeighborEntry neighborEntry, bool bulk_op) + : neighborEntry(neighborEntry), bulk_op(bulk_op) + { + } +}; + class NeighOrch : public Orch, public Subject, public Observer { public: @@ -66,12 +88,15 @@ class NeighOrch : public Orch, public Subject, public Observer bool enableNeighbor(const NeighborEntry&); bool disableNeighbor(const NeighborEntry&); + bool enableNeighbors(std::list&); + bool disableNeighbors(std::list&); bool isHwConfigured(const NeighborEntry&); sai_object_id_t addTunnelNextHop(const NextHopKey&); bool removeTunnelNextHop(const NextHopKey&); bool ifChangeInformNextHop(const string &, bool); + bool isNextHopFlagSet(const NextHopKey &, const uint32_t); bool removeOverlayNextHop(const NextHopKey &); void update(SubjectType, void *); @@ -81,6 +106,7 @@ class NeighOrch : public Orch, public Subject, public Observer void resolveNeighbor(const NeighborEntry &); void updateSrv6Nexthop(const NextHopKey &, const sai_object_id_t &); + bool ifChangeInformRemoteNextHop(const string &, bool); private: PortsOrch *m_portsOrch; @@ -93,10 +119,14 @@ class NeighOrch : public Orch, public Subject, public Observer std::set m_neighborToResolve; + EntityBulker gNeighBulker; + bool removeNextHop(const IpAddress&, const string&); - bool addNeighbor(const NeighborEntry&, const MacAddress&); - bool removeNeighbor(const NeighborEntry&, bool disable = false); + bool addNeighbor(NeighborContext& ctx); + bool removeNeighbor(NeighborContext& ctx, bool disable = false); + bool processBulkEnableNeighbor(NeighborContext& ctx); + bool processBulkDisableNeighbor(NeighborContext& ctx); bool setNextHopFlag(const NextHopKey &, const uint32_t); bool clearNextHopFlag(const NextHopKey &, const uint32_t); diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 3fc44bf81a..d5bda136fb 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -20,6 +20,7 @@ using namespace swss; #define PFC_WD_POLL_MSECS 100 #define APP_FABRIC_MONITOR_PORT_TABLE_NAME "FABRIC_PORT_TABLE" +#define APP_FABRIC_MONITOR_DATA_TABLE_NAME "FABRIC_MONITOR_TABLE" /* orchagent heart beat message interval */ #define HEART_BEAT_INTERVAL_MSECS 10 * 1000 @@ -527,7 +528,8 @@ bool OrchDaemon::init() // register APP_FABRIC_MONITOR_PORT_TABLE_NAME table const int fabric_portsorch_base_pri = 30; vector fabric_port_tables = { - { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri } + { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri }, + { APP_FABRIC_MONITOR_DATA_TABLE_NAME, fabric_portsorch_base_pri } }; gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables, m_fabricPortStatEnabled, m_fabricQueueStatEnabled); m_orchList.push_back(gFabricPortsOrch); @@ -1088,7 +1090,8 @@ bool FabricOrchDaemon::init() const int fabric_portsorch_base_pri = 30; vector fabric_port_tables = { - { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri } + { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri }, + { APP_FABRIC_MONITOR_DATA_TABLE_NAME, fabric_portsorch_base_pri } }; gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables); addOrchList(gFabricPortsOrch); diff --git a/orchagent/p4orch/tests/mock_sai_neighbor.h b/orchagent/p4orch/tests/mock_sai_neighbor.h index cd8f2aa0a9..4355831d36 100644 --- a/orchagent/p4orch/tests/mock_sai_neighbor.h +++ b/orchagent/p4orch/tests/mock_sai_neighbor.h @@ -16,6 +16,12 @@ class MockSaiNeighbor MOCK_METHOD1(remove_neighbor_entry, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry)); + MOCK_METHOD6(create_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses)); + MOCK_METHOD2(set_neighbor_entry_attribute, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr)); @@ -37,6 +43,18 @@ sai_status_t mock_remove_neighbor_entry(_In_ const sai_neighbor_entry_t *neighbo return mock_sai_neighbor->remove_neighbor_entry(neighbor_entry); } +sai_status_t mock_create_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->create_neighbor_entries(object_count, neighbor_entry, attr_count, attr_list, mode, object_statuses); +} + +sai_status_t mock_remove_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->remove_neighbor_entries(object_count, neighbor_entry, mode, object_statuses); +} + sai_status_t mock_set_neighbor_entry_attribute(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr) { diff --git a/orchagent/p4orch/tests/neighbor_manager_test.cpp b/orchagent/p4orch/tests/neighbor_manager_test.cpp index 4db1db873e..7523701cb7 100644 --- a/orchagent/p4orch/tests/neighbor_manager_test.cpp +++ b/orchagent/p4orch/tests/neighbor_manager_test.cpp @@ -124,6 +124,8 @@ class NeighborManagerTest : public ::testing::Test mock_sai_neighbor = &mock_sai_neighbor_; sai_neighbor_api->create_neighbor_entry = mock_create_neighbor_entry; sai_neighbor_api->remove_neighbor_entry = mock_remove_neighbor_entry; + sai_neighbor_api->create_neighbor_entries = mock_create_neighbor_entries; + sai_neighbor_api->remove_neighbor_entries = mock_remove_neighbor_entries; sai_neighbor_api->set_neighbor_entry_attribute = mock_set_neighbor_entry_attribute; sai_neighbor_api->get_neighbor_entry_attribute = mock_get_neighbor_entry_attribute; } diff --git a/orchagent/port.h b/orchagent/port.h index d153b20318..0ae9b97b67 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -13,6 +13,7 @@ extern "C" { #include #include +#include #define DEFAULT_PORT_VLAN_ID 1 /* @@ -212,6 +213,14 @@ class Port /* Path Tracing */ uint16_t m_pt_intf_id = 0; sai_port_path_tracing_timestamp_type_t m_pt_timestamp_template = SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23; + + /* link event damping */ + sai_redis_link_event_damping_algorithm_t m_link_event_damping_algorithm = SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED; + uint32_t m_max_suppress_time = 0; + uint32_t m_decay_half_life = 0; + uint32_t m_suppress_threshold = 0; + uint32_t m_reuse_threshold = 0; + uint32_t m_flap_penalty = 0; }; } diff --git a/orchagent/port/portcnt.h b/orchagent/port/portcnt.h index 9e3e63f9b7..33d52231cc 100644 --- a/orchagent/port/portcnt.h +++ b/orchagent/port/portcnt.h @@ -217,6 +217,40 @@ class PortConfig final bool is_set = false; } pt_timestamp_template; // Port timestamp template for Path Tracing + struct { + sai_redis_link_event_damping_algorithm_t value; + bool is_set = false; + } link_event_damping_algorithm; // Port link event damping algorithm + + struct { + + struct { + uint32_t value; + bool is_set = false; + } max_suppress_time; // Max suppress time + + struct { + uint32_t value; + bool is_set = false; + } decay_half_life; // Decay half life + + struct { + uint32_t value; + bool is_set = false; + } suppress_threshold; // Suppress threshold + + struct { + uint32_t value; + bool is_set = false; + } reuse_threshold; // Reuse threshold + + struct { + uint32_t value; + bool is_set = false; + } flap_penalty; // Flap penalty + + } link_event_damping_config; // Port link event damping config + std::string key; std::string op; diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp index 7ac9c15c52..181fef9f69 100644 --- a/orchagent/port/porthlpr.cpp +++ b/orchagent/port/porthlpr.cpp @@ -21,6 +21,7 @@ using namespace swss; // types -------------------------------------------------------------------------------------------------------------- typedef decltype(PortConfig::serdes) PortSerdes_t; +typedef decltype(PortConfig::link_event_damping_config) PortDampingConfig_t; // constants ---------------------------------------------------------------------------------------------------------- @@ -126,6 +127,12 @@ static const std::unordered_map g_linkEventDampingAlgorithmMap = +{ + { "disabled", SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED }, + { "aied", SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_AIED } +}; + // functions ---------------------------------------------------------------------------------------------------------- template @@ -246,6 +253,11 @@ std::string PortHelper::getPtTimestampTemplateStr(const PortConfig &port) const return this->getFieldValueStr(port, PORT_PT_TIMESTAMP_TEMPLATE); } +std::string PortHelper::getDampingAlgorithm(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_DAMPING_ALGO); +} + bool PortHelper::parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const { SWSS_LOG_ENTER(); @@ -786,6 +798,60 @@ bool PortHelper::parsePortSubport(PortConfig &port, const std::string &field, co return true; } +bool PortHelper::parsePortLinkEventDampingAlgorithm(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = g_linkEventDampingAlgorithmMap.find(value); + if (cit == g_linkEventDampingAlgorithmMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.link_event_damping_algorithm.value = cit->second; + port.link_event_damping_algorithm.is_set = true; + + return true; +} + +template +bool PortHelper::parsePortLinkEventDampingConfig(T &damping_config_attr, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + try + { + damping_config_attr.value = to_uint(value); + damping_config_attr.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::max_suppress_time) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::decay_half_life) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::suppress_threshold) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::reuse_threshold) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::flap_penalty) &damping_config_attr, const std::string &field, const std::string &value) const; + bool PortHelper::parsePortPtIntfId(PortConfig &port, const std::string &field, const std::string &value) const { SWSS_LOG_ENTER(); @@ -1121,6 +1187,48 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_DAMPING_ALGO) + { + if (!this->parsePortLinkEventDampingAlgorithm(port, field, value)) + { + return false; + } + } + else if (field == PORT_MAX_SUPPRESS_TIME) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.max_suppress_time, field, value)) + { + return false; + } + } + else if (field == PORT_DECAY_HALF_LIFE) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.decay_half_life, field, value)) + { + return false; + } + } + else if (field == PORT_SUPPRESS_THRESHOLD) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.suppress_threshold, field, value)) + { + return false; + } + } + else if (field == PORT_REUSE_THRESHOLD) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.reuse_threshold, field, value)) + { + return false; + } + } + else if (field == PORT_FLAP_PENALTY) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.flap_penalty, field, value)) + { + return false; + } + } else { SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); diff --git a/orchagent/port/porthlpr.h b/orchagent/port/porthlpr.h index 3852759975..45a4893a39 100644 --- a/orchagent/port/porthlpr.h +++ b/orchagent/port/porthlpr.h @@ -27,6 +27,7 @@ class PortHelper final std::string getLinkTrainingStr(const PortConfig &port) const; std::string getAdminStatusStr(const PortConfig &port) const; std::string getPtTimestampTemplateStr(const PortConfig &port) const; + std::string getDampingAlgorithm(const PortConfig &port) const; bool parsePortConfig(PortConfig &port) const; bool validatePortConfig(PortConfig &port) const; @@ -37,6 +38,10 @@ class PortHelper final template bool parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const; + bool parsePortLinkEventDampingAlgorithm(PortConfig &port, const std::string &field, const std::string &value) const; + template + bool parsePortLinkEventDampingConfig(T &damping_config_attr, const std::string &field, const std::string &value) const; + bool parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const; diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h index c9a3274913..8dd7f79200 100644 --- a/orchagent/port/portschema.h +++ b/orchagent/port/portschema.h @@ -95,3 +95,9 @@ #define PORT_SUBPORT "subport" #define PORT_PT_INTF_ID "pt_interface_id" #define PORT_PT_TIMESTAMP_TEMPLATE "pt_timestamp_template" +#define PORT_DAMPING_ALGO "link_event_damping_algorithm" +#define PORT_MAX_SUPPRESS_TIME "max_suppress_time" +#define PORT_DECAY_HALF_LIFE "decay_half_life" +#define PORT_SUPPRESS_THRESHOLD "suppress_threshold" +#define PORT_REUSE_THRESHOLD "reuse_threshold" +#define PORT_FLAP_PENALTY "flap_penalty" diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 56f6ae7278..799a57be57 100644 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -3172,6 +3172,49 @@ task_process_status PortsOrch::setPortLinkTraining(const Port &port, bool state) return task_success; } +ReturnCode PortsOrch::setPortLinkEventDampingAlgorithm(Port &port, + sai_redis_link_event_damping_algorithm_t &link_event_damping_algorithm) +{ + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGORITHM; + attr.value.s32 = link_event_damping_algorithm; + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_port_api->set_port_attribute(port.m_port_id, &attr), + "Failed to set link event damping algorithm (" << link_event_damping_algorithm << ") for port " + << port.m_alias); + + SWSS_LOG_INFO("Set link event damping algorithm %u for port %s", link_event_damping_algorithm, port.m_alias.c_str()); + return ReturnCode(); +} + +ReturnCode PortsOrch::setPortLinkEventDampingAiedConfig(Port &port, + sai_redis_link_event_damping_algo_aied_config_t &config) { + + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGO_AIED_CONFIG; + attr.value.ptr = (void *) &config; + + std::stringstream msg; + msg << "link event damping algorithm aied config for port " << port.m_alias << " - "; + msg << "max_suppress_time: " << config.max_suppress_time << ", "; + msg << "decay_half_life: " << config.decay_half_life << ", "; + msg << "suppress_threshold: " << config.suppress_threshold << ", "; + msg << "reuse_threshold: " << config.reuse_threshold << ", "; + msg << "flap_penalty: " << config.flap_penalty; + + std::string msg_str = msg.str(); + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_port_api->set_port_attribute(port.m_port_id, &attr), "Failed to set " + msg_str); + + SWSS_LOG_INFO("Set %s", msg_str.c_str()); + + return ReturnCode(); +} + bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const { SWSS_LOG_ENTER(); @@ -4034,6 +4077,86 @@ void PortsOrch::doPortTask(Consumer &consumer) } } + if (pCfg.link_event_damping_algorithm.is_set) + { + if (p.m_link_event_damping_algorithm != pCfg.link_event_damping_algorithm.value) + { + auto status = setPortLinkEventDampingAlgorithm(p, pCfg.link_event_damping_algorithm.value); + if (!status.ok()) + { + SWSS_LOG_ERROR( + "Failed to set port %s link event damping algorithm to %s", + p.m_alias.c_str(), m_portHlpr.getDampingAlgorithm(pCfg).c_str() + ); + it = taskMap.erase(it); + continue; + } + + p.m_link_event_damping_algorithm = pCfg.link_event_damping_algorithm.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s link event damping algorithm to %s", + p.m_alias.c_str(), m_portHlpr.getDampingAlgorithm(pCfg).c_str() + ); + } + } + + sai_redis_link_event_damping_algo_aied_config_t aied_config = { + p.m_max_suppress_time, + p.m_suppress_threshold, + p.m_reuse_threshold, + p.m_decay_half_life, + p.m_flap_penalty, + }; + + if (pCfg.link_event_damping_config.max_suppress_time.is_set) + { + aied_config.max_suppress_time = pCfg.link_event_damping_config.max_suppress_time.value; + } + if (pCfg.link_event_damping_config.decay_half_life.is_set) + { + aied_config.decay_half_life = pCfg.link_event_damping_config.decay_half_life.value; + } + if (pCfg.link_event_damping_config.suppress_threshold.is_set) + { + aied_config.suppress_threshold = pCfg.link_event_damping_config.suppress_threshold.value; + } + if (pCfg.link_event_damping_config.reuse_threshold.is_set) + { + aied_config.reuse_threshold = pCfg.link_event_damping_config.reuse_threshold.value; + } + if (pCfg.link_event_damping_config.flap_penalty.is_set) + { + aied_config.flap_penalty = pCfg.link_event_damping_config.flap_penalty.value; + } + + bool config_changed = !(aied_config.max_suppress_time == p.m_max_suppress_time && + aied_config.decay_half_life == p.m_decay_half_life && + aied_config.suppress_threshold == p.m_suppress_threshold && + aied_config.reuse_threshold == p.m_reuse_threshold && + aied_config.flap_penalty == p.m_flap_penalty); + + if (config_changed) + { + auto status = setPortLinkEventDampingAiedConfig(p, aied_config); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to set port %s link event damping config", p.m_alias.c_str()); + it = taskMap.erase(it); + continue; + } + + p.m_max_suppress_time = aied_config.max_suppress_time; + p.m_decay_half_life = aied_config.decay_half_life; + p.m_suppress_threshold = aied_config.suppress_threshold; + p.m_reuse_threshold = aied_config.reuse_threshold; + p.m_flap_penalty = aied_config.flap_penalty; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE("Set port %s link event damping config successfully", p.m_alias.c_str()); + } + if (pCfg.speed.is_set) { if (p.m_speed != pCfg.speed.value) @@ -7990,6 +8113,8 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) isUp ? "up" : "down"); } } + SWSS_LOG_INFO("Updating the nexthop for port %s and operational status %s", port.m_alias.c_str(), isUp ? "up" : "down"); + if (!gNeighOrch->ifChangeInformNextHop(port.m_alias, isUp)) { SWSS_LOG_WARN("Inform nexthop operation failed for interface %s", port.m_alias.c_str()); @@ -8002,6 +8127,15 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) } } + if(gMySwitchType == "voq") + { + if (gIntfsOrch->isLocalSystemPortIntf(port.m_alias)) + { + gIntfsOrch->voqSyncIntfState(port.m_alias, isUp); + } + } + + PortOperStateUpdate update = {port, status}; notify(SUBJECT_TYPE_PORT_OPER_STATE_CHANGE, static_cast(&update)); } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index bf3d94ad3a..fa149694fe 100644 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -463,6 +463,11 @@ class PortsOrch : public Orch, public Subject task_process_status setPortAdvInterfaceTypes(Port &port, std::set &interface_types); task_process_status setPortLinkTraining(const Port& port, bool state); + ReturnCode setPortLinkEventDampingAlgorithm(Port &port, + sai_redis_link_event_damping_algorithm_t &link_event_damping_algorithm); + ReturnCode setPortLinkEventDampingAiedConfig(Port &port, + sai_redis_link_event_damping_algo_aied_config_t &config); + void updatePortOperStatus(Port &port, sai_port_oper_status_t status); bool getPortOperSpeed(const Port& port, sai_uint32_t& speed) const; diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 90fc6fc766..21cb11c5db 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -2018,6 +2018,7 @@ task_process_status QosOrch::handleGlobalQosMap(const string &OP, KeyOpFieldsVal { SWSS_LOG_INFO("Global QoS map %s is not yet created", map_name.c_str()); task_status = task_process_status::task_need_retry; + continue; } if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, id)) diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index b976c728a7..5c482d726d 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -21,6 +21,7 @@ #include "neighorch.h" #include "crmorch.h" #include "routeorch.h" +#include "tunneldecaporch.h" #include "flowcounterrouteorch.h" extern sai_virtual_router_api_t* sai_virtual_router_api; @@ -43,6 +44,7 @@ extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; extern SwitchOrch *gSwitchOrch; +extern TunnelDecapOrch *gTunneldecapOrch; /* * VRF Modeling and VNetVrf class definitions */ @@ -334,7 +336,7 @@ VNetVrfObject::~VNetVrfObject() set vr_ent = getVRids(); for (auto it : vr_ent) { - if (it != gVirtualRouterId) + if (it != gVirtualRouterId) { sai_status_t status = sai_virtual_router_api->remove_virtual_router(it); if (status != SAI_STATUS_SUCCESS) @@ -717,7 +719,8 @@ static bool update_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_obj } VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *vnetOrch) - : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME) + : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME), + app_tunnel_decap_term_producer_(db, APP_TUNNEL_DECAP_TERM_TABLE_NAME) { SWSS_LOG_ENTER(); @@ -1432,6 +1435,39 @@ bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, return true; } +inline void VNetRouteOrch::createSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + if (!config.enable || subnet_decap_terms_created_.find(ipPrefix) != subnet_decap_terms_created_.end()) + { + return; + } + SWSS_LOG_NOTICE("Add subnet decap term for %s", ipPrefix.to_string().c_str()); + static const vector data = { + {"term_type", "MP2MP"}, + {"subnet_type", "vip"} + }; + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + app_tunnel_decap_term_producer_.set(key, data); + subnet_decap_terms_created_.insert(ipPrefix); +} + +inline void VNetRouteOrch::removeSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + auto it = subnet_decap_terms_created_.find(ipPrefix); + if (it == subnet_decap_terms_created_.end()) + { + return; + } + SWSS_LOG_NOTICE("Remove subnet decap term for %s", ipPrefix.to_string().c_str()); + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + app_tunnel_decap_term_producer_.del(key); + subnet_decap_terms_created_.erase(it); +} + template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, nextHop& nh, string& op) @@ -2088,6 +2124,14 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH removeRouteAdvertisement(prefix_to_use); } } + if (route_state == "active") + { + createSubnetDecapTerm(prefix_to_use); + } + else if (route_state == "inactive") + { + removeSubnetDecapTerm(prefix_to_use); + } } void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) @@ -2101,11 +2145,13 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) if(adv_prefix_refcount_[adv_pfx] == 1) { removeRouteAdvertisement(adv_pfx); + removeSubnetDecapTerm(adv_pfx); } } else { removeRouteAdvertisement(ipPrefix); + removeSubnetDecapTerm(ipPrefix); } } diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 0cffa115fd..e2ba25d0a5 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -463,6 +463,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void updateVnetTunnel(const BfdUpdate&); void updateVnetTunnelCustomMonitor(const MonitorUpdate& update); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); + void createSubnetDecapTerm(const IpPrefix &ipPrefix); + void removeSubnetDecapTerm(const IpPrefix &ipPrefix); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, @@ -485,7 +487,9 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer std::map nexthop_info_; std::map prefix_to_adv_prefix_; std::map adv_prefix_refcount_; + std::set subnet_decap_terms_created_; ProducerStateTable bfd_session_producer_; + ProducerStateTable app_tunnel_decap_term_producer_; unique_ptr monitor_session_producer_; shared_ptr state_db_; shared_ptr app_db_; diff --git a/tests/dvslib/dvs_database.py b/tests/dvslib/dvs_database.py index 553c0d7710..6724698289 100644 --- a/tests/dvslib/dvs_database.py +++ b/tests/dvslib/dvs_database.py @@ -109,7 +109,19 @@ def delete_field(self, table_name: str, key: str, field: str) -> None: """ table = swsscommon.Table(self.db_connection, table_name) table.hdel(key, field) - + + def set_field(self, table_name: str, key: str, field: str, value: str) -> None: + """Add/Update a field in an entry stored at `key` in the specified table. + + Args: + table_name: The name of the table where the entry is being removed. + key: The key that maps to the entry being added/updated. + field: The field that needs to be added/updated. + value: The value that is set for the field. + """ + table = swsscommon.Table(self.db_connection, table_name) + table.hset(key, field, value) + def get_keys(self, table_name: str) -> List[str]: """Get all of the keys stored in the specified table. diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 8005199935..4a92d65c80 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -24,6 +24,7 @@ extern sai_port_api_t *sai_port_api; extern sai_vlan_api_t *sai_vlan_api; extern sai_bridge_api_t *sai_bridge_api; extern sai_route_api_t *sai_route_api; +extern sai_route_api_t *sai_neighbor_api; extern sai_mpls_api_t *sai_mpls_api; extern sai_next_hop_group_api_t* sai_next_hop_group_api; extern string gMySwitchType; @@ -318,6 +319,7 @@ namespace aclorch_test sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); sai_api_query(SAI_API_ROUTE, (void **)&sai_route_api); + sai_api_query(SAI_API_NEIGHBOR, (void **)&sai_neighbor_api); sai_api_query(SAI_API_MPLS, (void **)&sai_mpls_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_NEXT_HOP_GROUP, (void **)&sai_next_hop_group_api); @@ -490,6 +492,7 @@ namespace aclorch_test sai_vlan_api = nullptr; sai_bridge_api = nullptr; sai_route_api = nullptr; + sai_neighbor_api = nullptr; sai_mpls_api = nullptr; } diff --git a/tests/mock_tests/bulker_ut.cpp b/tests/mock_tests/bulker_ut.cpp index 6210cc0969..dc5ad78776 100644 --- a/tests/mock_tests/bulker_ut.cpp +++ b/tests/mock_tests/bulker_ut.cpp @@ -2,6 +2,7 @@ #include "bulker.h" extern sai_route_api_t *sai_route_api; +extern sai_neighbor_api_t *sai_neighbor_api; namespace bulker_test { @@ -17,12 +18,18 @@ namespace bulker_test { ASSERT_EQ(sai_route_api, nullptr); sai_route_api = new sai_route_api_t(); + + ASSERT_EQ(sai_neighbor_api, nullptr); + sai_neighbor_api = new sai_neighbor_api_t(); } void TearDown() override { delete sai_route_api; sai_route_api = nullptr; + + delete sai_neighbor_api; + sai_neighbor_api = nullptr; } }; @@ -142,4 +149,28 @@ namespace bulker_test // Confirm route entry is not pending removal ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_non_remove)); } + + TEST_F(BulkerTest, NeighborBulker) + { + // Create bulker + EntityBulker gNeighBulker(sai_neighbor_api, 1000); + deque object_statuses; + + // Check max bulk size + ASSERT_EQ(gNeighBulker.max_bulk_size, 1000); + + // Create a dummy neighbor entry + sai_neighbor_entry_t neighbor_entry_remove; + neighbor_entry_remove.ip_address.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + neighbor_entry_remove.ip_address.addr.ip4 = 0x10000001; + neighbor_entry_remove.rif_id = 0x0; + neighbor_entry_remove.switch_id = 0x0; + + // Put neighbor entry into remove + object_statuses.emplace_back(); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry_remove); + + // Confirm neighbor entry is pending removal + ASSERT_TRUE(gNeighBulker.bulk_entry_pending_removal(neighbor_entry_remove)); + } } diff --git a/tests/mock_tests/mock_orch_test.h b/tests/mock_tests/mock_orch_test.h index f0e022a7bc..fe6d3a0e07 100644 --- a/tests/mock_tests/mock_orch_test.h +++ b/tests/mock_tests/mock_orch_test.h @@ -19,16 +19,24 @@ namespace mock_orch_test static const string PEER_IPV4_ADDRESS = "1.1.1.1"; static const string ACTIVE_INTERFACE = "Ethernet4"; static const string STANDBY_INTERFACE = "Ethernet8"; + static const string ETHERNET0 = "Ethernet0"; + static const string ETHERNET4 = "Ethernet4"; + static const string ETHERNET8 = "Ethernet8"; + static const string ETHERNET12 = "Ethernet12"; static const string ACTIVE_STATE = "active"; static const string STANDBY_STATE = "standby"; static const string STATE = "state"; static const string VLAN_1000 = "Vlan1000"; static const string VLAN_2000 = "Vlan2000"; + static const string VLAN_3000 = "Vlan3000"; + static const string VLAN_4000 = "Vlan4000"; static const string SERVER_IP1 = "192.168.0.2"; static const string SERVER_IP2 = "192.168.0.3"; static const string MAC1 = "62:f9:65:10:2f:01"; static const string MAC2 = "62:f9:65:10:2f:02"; static const string MAC3 = "62:f9:65:10:2f:03"; + static const string MAC4 = "62:f9:65:10:2f:04"; + static const string MAC5 = "62:f9:65:10:2f:05"; class MockOrchTest: public ::testing::Test { diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h index 7819b5b126..58e3c9da23 100644 --- a/tests/mock_tests/mock_sai_api.h +++ b/tests/mock_tests/mock_sai_api.h @@ -24,8 +24,12 @@ EXTERN_MOCK_FNS #define CREATE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define REMOVE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry +#define CREATE_BULK_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses +#define REMOVE_BULK_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ sai_bulk_op_error_mode_t mode, _In_ sai_status_t *object_statuses #define CREATE_ARGS(sai_object_type) sai_object_type##_entry, attr_count, attr_list #define REMOVE_ARGS(sai_object_type) sai_object_type##_entry +#define CREATE_BULK_ARGS(sai_object_type) object_count, sai_object_type##_entry, attr_count, attr_list, mode, object_statuses +#define REMOVE_BULK_ARGS(sai_object_type) object_count, sai_object_type##_entry, mode, object_statuses #define GENERIC_CREATE_PARAMS(sai_object_type) _Out_ sai_object_id_t *sai_object_type##_id, _In_ sai_object_id_t switch_id, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define GENERIC_REMOVE_PARAMS(sai_object_type) _In_ sai_object_id_t sai_object_type##_id #define GENERIC_CREATE_ARGS(sai_object_type) sai_object_type##_id, switch_id, attr_count, attr_list @@ -42,8 +46,8 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the 7. Define a method to remove the mock */ #define DEFINE_SAI_API_MOCK(sai_object_type) \ - static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ - static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ + static sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ class mock_sai_##sai_object_type##_api_t \ { \ public: \ @@ -59,20 +63,40 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the [this](REMOVE_PARAMS(sai_object_type)) { \ return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ }); \ + ON_CALL(*this, create_##sai_object_type##_entries) \ + .WillByDefault( \ + [this](CREATE_BULK_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->create_##sai_object_type##_entries(CREATE_BULK_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##_entries) \ + .WillByDefault( \ + [this](REMOVE_BULK_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entries(REMOVE_BULK_ARGS(sai_object_type)); \ + }); \ } \ MOCK_METHOD3(create_##sai_object_type##_entry, sai_status_t(CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type##_entry, sai_status_t(REMOVE_PARAMS(sai_object_type))); \ + MOCK_METHOD6(create_##sai_object_type##_entries, sai_status_t(CREATE_BULK_PARAMS(sai_object_type))); \ + MOCK_METHOD4(remove_##sai_object_type##_entries, sai_status_t(REMOVE_BULK_PARAMS(sai_object_type))); \ }; \ - static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ - inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ + inline sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ } \ - inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ } \ - inline void apply_sai_##sai_object_type##_api_mock() \ + inline sai_status_t mock_create_##sai_object_type##_entries(CREATE_BULK_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entries(CREATE_BULK_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type##_entries(REMOVE_BULK_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entries(REMOVE_BULK_ARGS(sai_object_type)); \ + } \ + inline void apply_sai_##sai_object_type##_api_mock() \ { \ mock_sai_##sai_object_type##_api = new NiceMock(); \ \ @@ -82,16 +106,18 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the \ sai_##sai_object_type##_api->create_##sai_object_type##_entry = mock_create_##sai_object_type##_entry; \ sai_##sai_object_type##_api->remove_##sai_object_type##_entry = mock_remove_##sai_object_type##_entry; \ + sai_##sai_object_type##_api->create_##sai_object_type##_entries = mock_create_##sai_object_type##_entries; \ + sai_##sai_object_type##_api->remove_##sai_object_type##_entries = mock_remove_##sai_object_type##_entries; \ } \ - inline void remove_sai_##sai_object_type##_api_mock() \ + inline void remove_sai_##sai_object_type##_api_mock() \ { \ sai_##sai_object_type##_api = old_sai_##sai_object_type##_api; \ delete mock_sai_##sai_object_type##_api; \ } #define DEFINE_SAI_GENERIC_API_MOCK(sai_api_name, sai_object_type) \ - static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ - static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ class mock_sai_##sai_api_name##_api_t \ { \ public: \ @@ -111,16 +137,16 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ }; \ - static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ - inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ } \ - inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ } \ - inline void apply_sai_##sai_api_name##_api_mock() \ + inline void apply_sai_##sai_api_name##_api_mock() \ { \ mock_sai_##sai_api_name##_api = new NiceMock(); \ \ @@ -131,7 +157,7 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ } \ - inline void remove_sai_##sai_api_name##_api_mock() \ + inline void remove_sai_##sai_api_name##_api_mock() \ { \ sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ delete mock_sai_##sai_api_name##_api; \ diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index 008b0bd9b5..52aa29d24e 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -5,6 +5,10 @@ #include "orch.h" #undef protected #include "ut_helper.h" +#define private public +#include "neighorch.h" +#include "muxorch.h" +#undef private #include "mock_orchagent_main.h" #include "mock_sai_api.h" #include "mock_orch_test.h" @@ -19,13 +23,21 @@ namespace mux_rollback_test DEFINE_SAI_API_MOCK(route); DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + using ::testing::_; using namespace std; using namespace mock_orch_test; using ::testing::Return; using ::testing::Throw; + using ::testing::DoAll; + using ::testing::SetArrayArgument; static const string TEST_INTERFACE = "Ethernet4"; + sai_bulk_create_neighbor_entry_fn old_create_neighbor_entries; + sai_bulk_remove_neighbor_entry_fn old_remove_neighbor_entries; + sai_bulk_create_route_entry_fn old_create_route_entries; + sai_bulk_remove_route_entry_fn old_remove_route_entries; + class MuxRollbackTest : public MockOrchTest { protected: @@ -131,41 +143,57 @@ namespace mux_rollback_test INIT_SAI_API_MOCK(acl); INIT_SAI_API_MOCK(next_hop); MockSaiApis(); + old_create_neighbor_entries = gNeighOrch->gNeighBulker.create_entries; + old_remove_neighbor_entries = gNeighOrch->gNeighBulker.remove_entries; + old_create_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.create_entries; + old_remove_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.remove_entries; + gNeighOrch->gNeighBulker.create_entries = mock_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = mock_remove_neighbor_entries; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = mock_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = mock_remove_route_entries; } void PreTearDown() override { RestoreSaiApis(); + gNeighOrch->gNeighBulker.create_entries = old_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = old_remove_neighbor_entries; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = old_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = old_remove_route_entries; } }; TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_route_api, create_route_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); SetAndAssertMuxState(STANDBY_STATE); } @@ -201,7 +229,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) .WillOnce(Throw(runtime_error("Mock runtime error"))); SetMuxStateFromAppDb(ACTIVE_STATE); EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); @@ -210,7 +238,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) + EXPECT_CALL(*mock_sai_route_api, create_route_entries) .WillOnce(Throw(runtime_error("Mock runtime error"))); SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); @@ -218,7 +246,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) .WillOnce(Throw(logic_error("Mock logic error"))); SetMuxStateFromAppDb(ACTIVE_STATE); EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); @@ -227,7 +255,7 @@ namespace mux_rollback_test TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) { SetAndAssertMuxState(ACTIVE_STATE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) .WillOnce(Throw(logic_error("Mock logic error"))); SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); @@ -249,4 +277,12 @@ namespace mux_rollback_test SetMuxStateFromAppDb(STANDBY_STATE); EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopTableFullRollbackToActive) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); + } } diff --git a/tests/mock_tests/neighorch_ut.cpp b/tests/mock_tests/neighorch_ut.cpp index d82e10d987..13e4ead4b0 100644 --- a/tests/mock_tests/neighorch_ut.cpp +++ b/tests/mock_tests/neighorch_ut.cpp @@ -9,7 +9,6 @@ #include "mock_sai_api.h" #include "mock_orch_test.h" - EXTERN_MOCK_FNS namespace neighorch_test @@ -21,15 +20,18 @@ namespace neighorch_test using ::testing::Throw; static const string TEST_IP = "10.10.10.10"; - static const NeighborEntry VLAN1000_NEIGH = NeighborEntry(TEST_IP, VLAN_1000); + static const string VRF_3000 = "Vrf3000"; + static const NeighborEntry VLAN1000_NEIGH = NeighborEntry(TEST_IP, VLAN_1000); static const NeighborEntry VLAN2000_NEIGH = NeighborEntry(TEST_IP, VLAN_2000); + static const NeighborEntry VLAN3000_NEIGH = NeighborEntry(TEST_IP, VLAN_3000); + static const NeighborEntry VLAN4000_NEIGH = NeighborEntry(TEST_IP, VLAN_4000); - class NeighOrchTest: public MockOrchTest + class NeighOrchTest : public MockOrchTest { protected: void SetAndAssertMuxState(std::string interface, std::string state) { - MuxCable* muxCable = m_MuxOrch->getMuxCable(interface); + MuxCable *muxCable = m_MuxOrch->getMuxCable(interface); muxCable->setState(state); EXPECT_EQ(state, muxCable->getState()); } @@ -46,35 +48,49 @@ namespace neighorch_test void ApplyInitialConfigs() { - Table peer_switch_table = Table(m_config_db.get(), CFG_PEER_SWITCH_TABLE_NAME); - Table decap_tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); - Table decap_term_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TERM_TABLE_NAME); - Table mux_cable_table = Table(m_config_db.get(), CFG_MUX_CABLE_TABLE_NAME); Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); Table intf_table = Table(m_app_db.get(), APP_INTF_TABLE_NAME); Table fdb_table = Table(m_app_db.get(), APP_FDB_TABLE_NAME); + Table vrf_table = Table(m_app_db.get(), APP_VRF_TABLE_NAME); auto ports = ut_helper::getInitialSaiPorts(); - port_table.set(ACTIVE_INTERFACE, ports[ACTIVE_INTERFACE]); - port_table.set(STANDBY_INTERFACE, ports[STANDBY_INTERFACE]); + port_table.set(ETHERNET0, ports[ETHERNET0]); + port_table.set(ETHERNET4, ports[ETHERNET4]); + port_table.set(ETHERNET8, ports[ETHERNET8]); port_table.set("PortConfigDone", { { "count", to_string(1) } }); port_table.set("PortInitDone", { {} }); + vrf_table.set(VRF_3000, { {"NULL", "NULL"} }); + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, { "mtu", "9100" }, { "mac", "00:aa:bb:cc:dd:ee" } }); - vlan_table.set(VLAN_2000, { { "admin_status", "up"}, + vlan_table.set(VLAN_2000, { { "admin_status", "up" }, { "mtu", "9100" }, { "mac", "aa:11:bb:22:cc:33" } }); + vlan_table.set(VLAN_3000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "99:ff:88:ee:77:dd" } }); + vlan_table.set(VLAN_4000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "99:ff:88:ee:77:dd" } }); + vlan_member_table.set( + VLAN_1000 + vlan_member_table.getTableNameSeparator() + ETHERNET0, + { { "tagging_mode", "untagged" } }); + + vlan_member_table.set( + VLAN_2000 + vlan_member_table.getTableNameSeparator() + ETHERNET4, + { { "tagging_mode", "untagged" } }); + vlan_member_table.set( - VLAN_1000 + vlan_member_table.getTableNameSeparator() + ACTIVE_INTERFACE, + VLAN_3000 + vlan_member_table.getTableNameSeparator() + ETHERNET8, { { "tagging_mode", "untagged" } }); vlan_member_table.set( - VLAN_2000 + vlan_member_table.getTableNameSeparator() + STANDBY_INTERFACE, + VLAN_4000 + vlan_member_table.getTableNameSeparator() + ETHERNET12, { { "tagging_mode", "untagged" } }); intf_table.set(VLAN_1000, { { "grat_arp", "enabled" }, @@ -85,6 +101,16 @@ namespace neighorch_test { "proxy_arp", "enabled" }, { "mac_addr", "00:00:00:00:00:00" } }); + intf_table.set(VLAN_3000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "vrf_name", VRF_3000 }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set(VLAN_4000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "vrf_name", VRF_3000 }, + { "mac_addr", "00:00:00:00:00:00" } }); + intf_table.set( VLAN_1000 + neigh_table.getTableNameSeparator() + "192.168.0.1/24", { { "scope", "global" }, @@ -96,65 +122,56 @@ namespace neighorch_test { "scope", "global" }, { "family", "IPv4" }, }); - decap_term_table.set( - MUX_TUNNEL + neigh_table.getTableNameSeparator() + "2.2.2.2", { { "src_ip", "1.1.1.1" }, - { "term_type", "P2P" } }); - - decap_tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, - { "src_ip", "1.1.1.1" }, - { "ecn_mode", "copy_from_outer" }, - { "encap_ecn_mode", "standard" }, - { "ttl_mode", "pipe" }, - { "tunnel_type", "IPINIP" } }); - - peer_switch_table.set(PEER_SWITCH_HOSTNAME, { { "address_ipv4", PEER_IPV4_ADDRESS } }); - - mux_cable_table.set(ACTIVE_INTERFACE, { { "server_ipv4", SERVER_IP1 + "/32" }, - { "server_ipv6", "a::a/128" }, - { "state", "auto" } }); + intf_table.set( + VLAN_3000 + neigh_table.getTableNameSeparator() + "192.168.3.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); - mux_cable_table.set(STANDBY_INTERFACE, { { "server_ipv4", SERVER_IP2+ "/32" }, - { "server_ipv6", "a::b/128" }, - { "state", "auto" } }); + intf_table.set( + VLAN_4000 + neigh_table.getTableNameSeparator() + "192.168.3.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); gPortsOrch->addExistingData(&port_table); gPortsOrch->addExistingData(&vlan_table); gPortsOrch->addExistingData(&vlan_member_table); static_cast(gPortsOrch)->doTask(); + gVrfOrch->addExistingData(&vrf_table); + static_cast(gVrfOrch)->doTask(); + gIntfsOrch->addExistingData(&intf_table); static_cast(gIntfsOrch)->doTask(); - m_TunnelDecapOrch->addExistingData(&decap_tunnel_table); - m_TunnelDecapOrch->addExistingData(&decap_term_table); - static_cast(m_TunnelDecapOrch)->doTask(); - - m_MuxOrch->addExistingData(&peer_switch_table); - static_cast(m_MuxOrch)->doTask(); - - m_MuxOrch->addExistingData(&mux_cable_table); - static_cast(m_MuxOrch)->doTask(); - fdb_table.set( VLAN_1000 + fdb_table.getTableNameSeparator() + MAC1, { { "type", "dynamic" }, - { "port", ACTIVE_INTERFACE } }); + { "port", ETHERNET0 } }); fdb_table.set( VLAN_2000 + fdb_table.getTableNameSeparator() + MAC2, { { "type", "dynamic" }, - { "port", STANDBY_INTERFACE} }); + { "port", ETHERNET4 } }); fdb_table.set( VLAN_1000 + fdb_table.getTableNameSeparator() + MAC3, { { "type", "dynamic" }, - { "port", ACTIVE_INTERFACE} }); + { "port", ETHERNET0 } }); + + fdb_table.set( + VLAN_3000 + fdb_table.getTableNameSeparator() + MAC4, + { { "type", "dynamic" }, + { "port", ETHERNET8 } }); + + fdb_table.set( + VLAN_4000 + fdb_table.getTableNameSeparator() + MAC5, + { { "type", "dynamic" }, + { "port", ETHERNET12 } }); gFdbOrch->addExistingData(&fdb_table); static_cast(gFdbOrch)->doTask(); - - SetAndAssertMuxState(ACTIVE_INTERFACE, ACTIVE_STATE); - SetAndAssertMuxState(STANDBY_INTERFACE, STANDBY_STATE); } void PostSetUp() override @@ -169,18 +186,19 @@ namespace neighorch_test } }; - TEST_F(NeighOrchTest, MultiVlanIpLearning) + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighbor) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); LearnNeighbor(VLAN_1000, TEST_IP, MAC1); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); LearnNeighbor(VLAN_2000, TEST_IP, MAC2); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 0); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 1); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); LearnNeighbor(VLAN_1000, TEST_IP, MAC3); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); @@ -201,4 +219,50 @@ namespace neighorch_test ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 0); } + + TEST_F(NeighOrchTest, MultiVlanDifferentVrfDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + LearnNeighbor(VLAN_3000, TEST_IP, MAC4); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 1); + } + + TEST_F(NeighOrchTest, MultiVlanSameVrfDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_3000, TEST_IP, MAC4); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_4000, TEST_IP, MAC5); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 0); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN4000_NEIGH), 1); + } + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighborMissingExistingVlanPort) + { + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + gPortsOrch->m_portList.erase(VLAN_1000); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + } + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighborMissingNewVlanPort) + { + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + gPortsOrch->m_portList.erase(VLAN_2000); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + } } diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index afa26dc439..22f8632af1 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -92,6 +92,12 @@ namespace portsorch_test uint32_t set_pt_interface_id_failures; uint32_t set_pt_timestamp_template_failures; uint32_t set_port_tam_failures; + bool set_link_event_damping_success = true; + uint32_t _sai_set_link_event_damping_algorithm_count; + uint32_t _sai_set_link_event_damping_config_count; + int32_t _sai_link_event_damping_algorithm = 0; + sai_redis_link_event_damping_algo_aied_config_t _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + sai_status_t _ut_stub_sai_set_port_attribute( _In_ sai_object_id_t port_id, _In_ const sai_attribute_t *attr) @@ -148,6 +154,26 @@ namespace portsorch_test return SAI_STATUS_INVALID_ATTR_VALUE_0; } } + else if (attr[0].id == SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGORITHM) + { + _sai_set_link_event_damping_algorithm_count++; + + if (set_link_event_damping_success) { + _sai_link_event_damping_algorithm = attr[0].value.s32; + return SAI_STATUS_SUCCESS; + } + return SAI_STATUS_FAILURE; + } + else if (attr[0].id == SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGO_AIED_CONFIG) + { + _sai_set_link_event_damping_config_count++; + + if (set_link_event_damping_success) { + _sai_link_event_damping_config = *(reinterpret_cast(attr[0].value.ptr)); + return SAI_STATUS_SUCCESS; + } + return SAI_STATUS_FAILURE; + } return pold_sai_port_api->set_port_attribute(port_id, attr); } @@ -1671,6 +1697,314 @@ namespace portsorch_test _unhook_sai_bridge_api(); } + TEST_F(PortsOrchTest, SupportedLinkEventDampingAlgorithmSuccess) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "aied"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // verify SAI call was made and set algorithm successfully + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_AIED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SupportedLinkEventDampingAlgorithmFailure) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = false; + _sai_link_event_damping_algorithm = SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "aied"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that SAI call was made, algorithm not set + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, NotSupportedLinkEventDampingAlgorithm) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "test_algo"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that no SAI call was made + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingFullConfigSuccess) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"max_suppress_time", "64000"}, + {"decay_half_life", "45000"}, + {"suppress_threshold", "1650"}, + {"reuse_threshold", "1500"}, + {"flap_penalty", "1000"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 64000); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 45000); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 1650); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 1500); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 1000); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingPartialConfigSuccess) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"decay_half_life", "30000"}, + {"reuse_threshold", "1200"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 0); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 30000); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 1200); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 0); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingConfigFailure) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = false; + _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"max_suppress_time", "64000"}, + {"decay_half_life", "45000"}, + {"suppress_threshold", "1650"}, + {"reuse_threshold", "1500"}, + {"flap_penalty", "1000"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that config is not set + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 0); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 0); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 0); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortSupportedFecModes) { _hook_sai_port_api(); diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index 50aae599bf..0cdda7812d 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -1167,6 +1167,7 @@ namespace qosorch_test static_cast(gQosOrch)->doTask(); // Check DSCP_TO_TC_MAP|AZURE is applied to switch ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); // Remove global DSCP_TO_TC_MAP entries.push_back({"global", "DEL", {}}); @@ -1189,7 +1190,37 @@ namespace qosorch_test // Check DSCP_TO_TC_MAP|AZURE is removed, and the switch_level dscp_to_tc_map is set to NULL ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + + // Run the test in reverse order + entries.push_back({"global", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Try draining PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + entries.push_back({"AZURE", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Try draining DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); } TEST_F(QosOrchTest, QosOrchTestRetryFirstItem) diff --git a/tests/test_fabric_capacity.py b/tests/test_fabric_capacity.py index 91bb1b5e94..a796e9f6bf 100644 --- a/tests/test_fabric_capacity.py +++ b/tests/test_fabric_capacity.py @@ -22,6 +22,11 @@ def test_voq_switch_fabric_capacity(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + # get state_db infor sdb = dvs.get_state_db() # There are 16 fabric ports in the test environment. @@ -30,8 +35,6 @@ def test_voq_switch_fabric_capacity(self, vst): cdb_port = "Fabric"+str(portNum) sdb_port = "PORT"+str(portNum) - max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) - # setup test environment sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) diff --git a/tests/test_fabric_port.py b/tests/test_fabric_port.py index a7ad9958b0..dbdd235605 100644 --- a/tests/test_fabric_port.py +++ b/tests/test_fabric_port.py @@ -21,15 +21,22 @@ def test_voq_switch_fabric_link(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": - # get config_db information + # get app_db/config_db information cdb = dvs.get_config_db() + adb = dvs.get_app_db() + + # check if the fabric montior toggle working + cdb.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}) + + cdb.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}) # set config_db to isolateStatus: True cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) # check if appl_db value changes to isolateStatus: True - adb = dvs.get_app_db() adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "True"}) # cleanup diff --git a/tests/test_fabric_port_isolation.py b/tests/test_fabric_port_isolation.py index d1b57a019f..9743a4b702 100644 --- a/tests/test_fabric_port_isolation.py +++ b/tests/test_fabric_port_isolation.py @@ -21,6 +21,11 @@ def test_voq_switch_fabric_link(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + # get state_db infor sdb = dvs.get_state_db() # key @@ -30,7 +35,6 @@ def test_voq_switch_fabric_link(self, vst): port = "PORT"+str(portNum) # wait for link monitoring algorithm skips init pollings sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "TEST"}) - max_poll = PollingConfig(polling_interval=60, timeout=1200, strict=True) if sdb.get_entry("FABRIC_PORT_TABLE", port)['STATUS'] == 'up': try: # clean up the system for the testing port. @@ -46,6 +50,18 @@ def test_voq_switch_fabric_link(self, vst): # clear the testing errors and wait for link get unisolated. sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + + # inject testing errors and wait for link get isolated again. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + # now test force unisolate this link + configKey = "Fabric"+str(portNum) + curForceStatus = int( config_db.get_entry( "FABRIC_PORT", configKey)['forceUnisolateStatus'] ) + curForceStatus += 1 + config_db.update_entry("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}) + config_db.wait_for_field_match("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}, + polling_config=max_poll) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) finally: # cleanup sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) diff --git a/tests/test_fabric_rate.py b/tests/test_fabric_rate.py index 59e5303de3..1885aca2a9 100644 --- a/tests/test_fabric_rate.py +++ b/tests/test_fabric_rate.py @@ -22,6 +22,10 @@ def test_voq_switch_fabric_rate(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) # get state_db infor sdb = dvs.get_state_db() @@ -31,7 +35,6 @@ def test_voq_switch_fabric_rate(self, vst): portNum = random.randint(1, 16) sdb_port = "PORT"+str(portNum) - max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) tx_rate = sdb.get_entry("FABRIC_PORT_TABLE", sdb_port)['OLD_TX_DATA'] sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) sdb.wait_for_field_negative_match("FABRIC_PORT_TABLE", sdb_port, {'OLD_TX_DATA': tx_rate}, polling_config=max_poll) diff --git a/tests/test_fabric_switch_id.py b/tests/test_fabric_switch_id.py new file mode 100644 index 0000000000..f6f76011d5 --- /dev/null +++ b/tests/test_fabric_switch_id.py @@ -0,0 +1,48 @@ +from dvslib.dvs_common import wait_for_result, PollingConfig +import pytest + +class TestFabricSwitchId(object): + def check_syslog(self, dvs, marker, log): + def do_check_syslog(): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" %(marker, log)]) + return (int(out.strip()) >= 1, None) + max_poll = PollingConfig(polling_interval=5, timeout=600, strict=True) + wait_for_result(do_check_syslog, polling_config=max_poll) + + def test_invalid_fabric_switch_id(self, vst): + # Find supervisor dvs. + dvs = None + config_db = None + for name in vst.dvss.keys(): + dvs = vst.dvss[name] + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + break + assert dvs and config_db + + # Verify orchagent's handling of invalid fabric switch_id in following cases: + # - Invalid fabric switch_id, e.g, -1, is set. + # - fabric switch_id is missing in ConfigDb. + for invalid_switch_id in (-1, None): + print(f"Test invalid switch id {invalid_switch_id}") + if invalid_switch_id is None: + config_db.delete_field("DEVICE_METADATA", "localhost", "switch_id") + expected_log = "Fabric switch id is not configured" + else: + config_db.set_field("DEVICE_METADATA", "localhost", "switch_id", str(invalid_switch_id)) + expected_log = f"Invalid fabric switch id {invalid_switch_id} configured" + + # Restart orchagent and verify orchagent behavior by checking syslog. + dvs.stop_swss() + marker = dvs.add_log_marker() + dvs.start_swss() + self.check_syslog(dvs, marker, expected_log) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_mux.py b/tests/test_mux.py index 9405312a5a..fce1b4f37c 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -101,6 +101,8 @@ class TestMuxTunnelBase(): DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + BULK_NEIGHBOR_COUNT = 254 + def check_syslog(self, dvs, marker, err_log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) assert num.strip() >= str(expected_cnt) @@ -337,8 +339,66 @@ def del_route(self, dvs, route): ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) ps._del(route) + def wait_for_mux_state(self, dvs, interface, expected_state): + """ + Waits until state change completes - expected state is in state_db + """ + + apdb = dvs.get_app_db() + expected_field = {"state": expected_state} + apdb.wait_for_field_match(self.APP_MUX_CABLE, interface, expected_field) + + def bulk_neighbor_test(self, confdb, appdb, asicdb, dvs, dvs_route): + dvs.runcmd("ip neigh flush all") + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-11-11") + self.set_mux_state(appdb, "Ethernet0", "active") + + class neighbor_info: + ipv4_key = "" + ipv6_key = "" + ipv4 = "" + ipv6 = "" + + def __init__(self, i): + self.ipv4 = "192.168.1." + str(i) + self.ipv6 = "fc02:1001::" + str(i) + + neighbor_list = [neighbor_info(i) for i in range(100, self.BULK_NEIGHBOR_COUNT)] + for neigh_info in neighbor_list: + self.add_neighbor(dvs, neigh_info.ipv4, "00:00:00:00:11:11") + self.add_neighbor(dvs, neigh_info.ipv6, "00:00:00:00:11:11") + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + try: + self.set_mux_state(appdb, "Ethernet0", "standby") + self.wait_for_mux_state(dvs, "Ethernet0", "standby") + + for neigh_info in neighbor_list: + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv4_key) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv6_key) + dvs_route.check_asicdb_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + + self.set_mux_state(appdb, "Ethernet0", "active") + self.wait_for_mux_state(dvs, "Ethernet0", "active") + + for neigh_info in neighbor_list: + dvs_route.check_asicdb_deleted_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + finally: + for neigh_info in neighbor_list: + self.del_neighbor(dvs, neigh_info.ipv4) + self.del_neighbor(dvs, neigh_info.ipv6) + def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): + self.bulk_neighbor_test(confdb, appdb, asicdb, dvs, dvs_route) self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "standby") diff --git a/tests/test_port.py b/tests/test_port.py index d7bf62d2d7..feccb6917a 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -432,6 +432,54 @@ def test_PortPathTracing(self, dvs, testlog): for key, queue in buffer_queues.items(): dvs.get_config_db().update_entry("BUFFER_QUEUE", key, queue) + def test_PortLinkEventDamping(self, dvs, testlog): + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + + cfg_tbl = swsscommon.Table(cdb, "PORT") + app_tbl = swsscommon.Table(pdb, "PORT_TABLE") + port_name = "Ethernet0" + + # Set link event damping. + fvs = swsscommon.FieldValuePairs([("link_event_damping_algorithm", "aied"), + ("max_suppress_time", "54000"), + ("decay_half_life", "45000"), + ("suppress_threshold", "1650"), + ("reuse_threshold", "1500"), + ("flap_penalty", "1000") + ]) + cfg_tbl.set(port_name, fvs) + time.sleep(1) + + # Check application database. + (status, fvs) = app_tbl.get(port_name) + assert status == True + for fv in fvs: + if fv[0] == "link_event_damping_algorithm": + assert fv[1] == "aied" + elif fv[0] == "max_suppress_time": + assert fv[1] == "54000" + elif fv[0] == "decay_half_life": + assert fv[1] == "45000" + elif fv[0] == "suppress_threshold": + assert fv[1] == "1650" + elif fv[0] == "reuse_threshold": + assert fv[1] == "1500" + elif fv[0] == "flap_penalty": + assert fv[1] == "1000" + + # Disable link event damping. + fvs = swsscommon.FieldValuePairs([("link_event_damping_algorithm", "disabled")]) + cfg_tbl.set(port_name, fvs) + time.sleep(1) + + # Check application database. + (status, fvs) = app_tbl.get(port_name) + assert status == True + for fv in fvs: + if fv[0] == "link_event_damping_algorithm": + assert fv[1] == "disabled" + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index 5401f6870f..cd1e66a2b8 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -61,7 +61,70 @@ def del_inbandif_port(self, vct, ibport): # Applicable only for line cards if cfg_switch_type == "voq": config_db.delete_entry("VOQ_INBAND_INTERFACE", f"{ibport}") - + + def get_lc_dvs(self, vct, lc_switch_id): + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + switch_id = metatbl.get("switch_id") + assert switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" + if lc_switch_id == switch_id: + return dvs + + def get_sup_dvs(self, vct): + dvss = vct.dvss + for name in dvss.keys(): + if name.startswith("supervisor"): + return dvss[name] + + def configure_neighbor(self, dvs, action, test_neigh_ip, mac_address, test_neigh_dev): + _, res = dvs.runcmd(['sh', "-c", "ip neigh show"]) + if action == "add": + _, res = dvs.runcmd(['sh', "-c", f"ip neigh {action} {test_neigh_ip} lladdr {mac_address} dev {test_neigh_dev}"]) + assert res == "", "Error configuring static neigh" + else: + _, res = dvs.runcmd(['sh', "-c", f"ip neigh del {test_neigh_ip} dev {test_neigh_dev}"]) + assert res == "", "Error deleting static neigh" + + def get_num_of_ecmp_paths_from_asic_db(self, dvs, ip_prefix): + # get the route entry + routes = dvs.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + + + # find the entry for the interested prefix + route_key = "" + for route in routes: + if ip_prefix in route: + route_key = route + break + + assert route_key != "", "Route not found" + + # get the nexthop group oid + route_entry =dvs.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", route_key) + nhg_id = route_entry.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", None) + + assert nhg_id is not None, "nexthop group is not found" + + # find the nexthop in the nexthop group member table which belong the nhg_id + nhs = dvs.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + count = 0 + for nh in nhs: + nh_entry = dvs.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nh) + nh_nhg_id = nh_entry.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID", None) + + if nh_nhg_id == nhg_id: + count+=1 + + return count + def test_connectivity(self, vct): if vct is None: return @@ -972,7 +1035,93 @@ def test_chassis_wred_profile_on_system_ports(self, vct): # Total number of logs = (No of system ports * No of lossless priorities) - No of lossless priorities for CPU ports assert logSeen.strip() == str(len(system_ports)*2 - 2) + + def test_chassis_system_intf_status(self, vct): + dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + for key in keys: + intf = chassis_app_db.get_entry("SYSTEM_INTERFACE", key) + # Get the oper_status + oper_status = intf.get("oper_status", "unknown") + assert oper_status != "unknown", "System interface oper status is unknown" + + def test_remote_port_down(self, vct): + # test params + local_lc_switch_id = '0' + remote_lc_switch_id = '2' + test_system_port = "lc1|Asic0|Ethernet4" + test_prefix = "13.13.0.0/16" + inband_port = "Ethernet0" + test_neigh_ip_1 = "10.8.104.10" + test_neigh_dev_1 = "Ethernet4" + test_neigh_mac_1 = "00:01:02:03:04:05" + test_neigh_ip_2 = "10.8.108.10" + test_neigh_dev_2 = "Ethernet8" + test_neigh_mac_2 = "00:01:02:03:04:06" + + local_lc_dvs = self.get_lc_dvs(vct, local_lc_switch_id) + remote_lc_dvs = self.get_lc_dvs(vct, remote_lc_switch_id) + # config inband port + self.config_inbandif_port(vct, inband_port) + + # add 2 neighbors + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_1) + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_2, test_neigh_mac_2, test_neigh_dev_2) + + time.sleep(30) + + # add route of LC1(pretend learnt via bgp) + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route add {test_prefix} nexthop via {test_neigh_ip_1} nexthop via {test_neigh_ip_2}"]) + assert res == "", "Error configuring route" + time.sleep(10) + # verify 2 nexthops are programmed in asic_db + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs, test_prefix) + assert paths == 2, "ECMP paths not configured" + + # shut down port on LC0 + local_lc_dvs.port_admin_set("Ethernet4", "down") + time.sleep(10) + + # verify the port oper status is down in chassis db + sup_dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, sup_dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + port_status = chassis_app_db.get_entry("SYSTEM_INTERFACE", test_system_port) + oper_status = port_status.get("oper_status", "unknown") + assert oper_status == "down", "System interface oper status is not down" + + # verify the number of paths is reduced by 1 + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs, test_prefix) + assert paths == 1, "Remote port down does not remote ecmp member" + + # shut down port on LC0 + local_lc_dvs.port_admin_set("Ethernet4", "up") + time.sleep(10) + + # verify the port oper status is up in chassis db + sup_dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, sup_dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + port_status = chassis_app_db.get_entry("SYSTEM_INTERFACE", test_system_port) + oper_status = port_status.get("oper_status", "unknown") + assert oper_status == "up", "System interface oper status is not down" + + # verify the number of paths is reduced by 1 + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs,test_prefix) + assert paths == 2, "Remote port up is not added in nexthop group" + + #cleanup + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route del {test_prefix} nexthop via {test_neigh_ip_1} nexthop via {test_neigh_ip_2}"]) + assert res == "", "Error configuring route" + # Cleanup inband if configuration + self.del_inbandif_port(vct, inband_port) + + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vnet.py b/tests/test_vnet.py index c28d7cf320..be08a52c69 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -1,4 +1,5 @@ import time +import ipaddress import json import random import time @@ -541,6 +542,62 @@ def check_syslog(dvs, marker, err_log): assert num.strip() == "0" +def create_fvs(**kwargs): + return swsscommon.FieldValuePairs(list(kwargs.items())) + + +def create_subnet_decap_tunnel(dvs, tunnel_name, **kwargs): + """Create tunnel and verify all needed entries in state DB exists.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + fvs = create_fvs(**kwargs) + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps.set(tunnel_name, fvs) + + # wait till config will be applied + time.sleep(1) + + # validate the tunnel entry in state db + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + tunnels = tunnel_state_table.getKeys() + for tunnel in tunnels: + status, fvs = tunnel_state_table.get(tunnel) + assert status == True + + for field, value in fvs: + if field == "tunnel_type": + assert value == "IPINIP" + elif field == "dscp_mode": + assert value == kwargs["dscp_mode"] + elif field == "ecn_mode": + assert value == kwargs["ecn_mode"] + elif field == "ttl_mode": + assert value == kwargs["ttl_mode"] + elif field == "encap_ecn_mode": + assert value == kwargs["encap_ecn_mode"] + else: + assert False, "Field %s is not tested" % field + + +def delete_subnet_decap_tunnel(dvs, tunnel_name): + """Delete tunnel and checks that state DB is cleared.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tunnel_app_table = swsscommon.Table(appdb, "TUNNEL_DECAP_TABLE") + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps._del(tunnel_name) + + # wait till config will be applied + time.sleep(1) + + assert len(tunnel_app_table.getKeys()) == 0 + assert len(tunnel_state_table.getKeys()) == 0 + + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -577,11 +634,27 @@ class VnetVxlanVrfTunnel(object): ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" APP_VNET_MONITOR = "VNET_MONITOR_TABLE" + ecn_modes_map = { + "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", + "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" + } + + dscp_modes_map = { + "pipe" : "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL" + } + + ttl_modes_map = { + "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" + } + def __init__(self): self.tunnel_map_ids = set() self.tunnel_map_entry_ids = set() self.tunnel_ids = set() self.tunnel_term_ids = set() + self.ipinip_tunnel_term_ids = {} self.tunnel_map_map = {} self.tunnel = {} self.vnet_vr_ids = set() @@ -611,6 +684,61 @@ def fetch_exist_entries(self, dvs): if switch_mac is None: switch_mac = get_switch_mac(dvs) + def check_ipinip_tunnel(self, dvs, tunnel_name, dscp_mode, ecn_mode, ttl_mode): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) + tunnel_attrs = { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE': self.dscp_modes_map[dscp_mode], + 'SAI_TUNNEL_ATTR_ENCAP_ECN_MODE': self.ecn_modes_map[ecn_mode], + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': self.ttl_modes_map[ttl_mode] + } + check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, tunnel_attrs) + + self.tunnel_ids.add(tunnel_id) + self.tunnel[tunnel_name] = tunnel_id + + def check_del_ipinip_tunnel(self, dvs, tunnel_name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id) + self.tunnel_ids.remove(tunnel_id) + assert tunnel_id == self.tunnel[tunnel_name] + self.tunnel.pop(tunnel_name) + + def check_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + tunnel_term_attrs = { + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': str(dst_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK': str(dst_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP': str(src_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK': str(src_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': self.tunnel[tunnel_name] + } + check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, tunnel_term_attrs) + + self.tunnel_term_ids.add(tunnel_term_id) + self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] = tunnel_term_id + + def check_del_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id) + self.tunnel_term_ids.remove(tunnel_term_id) + assert self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] == tunnel_term_id + self.ipinip_tunnel_term_ids.pop((tunnel_name, src_ip, dst_ip)) + def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) global loopback_id, def_vr_id @@ -1099,6 +1227,30 @@ def check_custom_monitor_deleted(self, dvs, prefix, endpoint): class TestVnetOrch(object): + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" + + @pytest.fixture + def setup_subnet_decap(self, dvs): + + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + fvs = create_fvs(**subnet_decap_config) + subnet_decap_tbl.set("AZURE", fvs) + + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + _cleanup_subnet_decap_config() + + yield _apply_subnet_decap_config + + _cleanup_subnet_decap_config() + def get_vnet_obj(self): return VnetVxlanVrfTunnel() @@ -3524,6 +3676,178 @@ def test_vnet_orch_25(self, dvs, testlog): vnet_obj.check_del_vnet_entry(dvs, 'Vnet25') delete_vxlan_tunnel(dvs, tunnel_name) + ''' + Test 26 - Test for vnet tunnel routes with ECMP nexthop group with subnet decap enable + ''' + def test_vnet_orch_26(self, dvs, setup_subnet_decap): + # apply subnet decap config + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) + + # Add the subnet decap tunnel + create_subnet_decap_tunnel(dvs, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe") + vnet_obj.check_ipinip_tunnel(dvs, "IPINIP_SUBNET", "uniform", "standard", "pipe") + + vnet_obj.fetch_exist_entries(dvs) + tunnel_name = 'tunnel_26' + create_vxlan_tunnel(dvs, tunnel_name, '26.26.26.26') + create_vnet_entry(dvs, 'Vnet26', tunnel_name, '10026', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, 'Vnet26') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet26', '10026') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '26.26.26.26') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet26', '26.0.0.1,26.0.0.2,26.0.0.3', ep_monitor='26.1.0.1,26.1.0.2,26.1.0.3', profile="test_profile") + + with pytest.raises(AssertionError): + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '26.1.0.1', 'Up') + + time.sleep(2) + # subnet decap term should be created as one bfd session state go up + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") + + update_bfd_session_state(dvs, '26.1.0.2', 'Up') + update_bfd_session_state(dvs, '26.1.0.3', 'Up') + time.sleep(2) + vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet26', ['26.0.0.1', '26.0.0.2', '26.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", ['26.0.0.1', '26.0.0.2', '26.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '26.1.0.1', 'Down') + update_bfd_session_state(dvs, '26.1.0.2', 'Down') + update_bfd_session_state(dvs, '26.1.0.3', 'Down') + time.sleep(2) + + # subnet decap term should be removed as all bfd session states go down + vnet_obj.check_del_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove tunnel route + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet26') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3']) + + delete_vnet_entry(dvs, 'Vnet26') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet26') + delete_vxlan_tunnel(dvs, tunnel_name) + + # Remove the subnet decap tunnel + vnet_obj.fetch_exist_entries(dvs) + delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET") + vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET") + + ''' + Test 27 - Test for IPv6 vnet tunnel routes with ECMP nexthop group with subnet decap enable + ''' + def test_vnet_orch_27(self, dvs, setup_subnet_decap): + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) + + # Add the subnet decap tunnel + create_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe") + vnet_obj.check_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6", "uniform", "standard", "pipe") + + vnet_obj.fetch_exist_entries(dvs) + tunnel_name = 'tunnel_27' + vnet_name = 'Vnet26' + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3', profile="test_profile") + + with pytest.raises(AssertionError): + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "100.100.1.1/32", "10.10.10.0/24") + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + + time.sleep(2) + # subnet decap term should be created as one bfd session state go up + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "fd:10:10::1/128", "20c1:ba8::/64") + + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + time.sleep(2) + vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + check_routes_advertisement(dvs, "fd:10:10::1/128", "test_profile") + + # Set all endpoint to down state + update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + time.sleep(2) + + # subnet decap term should be removed as all bfd session states go down + vnet_obj.check_del_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "fd:10:10::1/128", "20c1:ba8::/64") + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Remove tunnel route + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3']) + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + # Remove the subnet decap tunnel + vnet_obj.fetch_exist_entries(dvs) + delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6") + vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/virtual_chassis/1/default_config.json b/tests/virtual_chassis/1/default_config.json index 88769c9ce6..8cea66ee12 100644 --- a/tests/virtual_chassis/1/default_config.json +++ b/tests/virtual_chassis/1/default_config.json @@ -15,8 +15,10 @@ "INTERFACE": { "Ethernet0": {}, "Ethernet4": {}, + "Ethernet8": {}, "Ethernet0|10.8.101.1/24": {}, - "Ethernet4|10.8.104.1/24": {} + "Ethernet4|10.8.104.1/24": {}, + "Ethernet8|10.8.108.1/24": {} }, "PORT": { "Ethernet0": { @@ -24,6 +26,9 @@ }, "Ethernet4": { "admin_status": "up" + }, + "Ethernet8": { + "admin_status": "up" } }, "SYSTEM_PORT": { diff --git a/tests/virtual_chassis/8/default_config.json b/tests/virtual_chassis/8/default_config.json index 6f77a1ade2..4160d7dd92 100644 --- a/tests/virtual_chassis/8/default_config.json +++ b/tests/virtual_chassis/8/default_config.json @@ -5,6 +5,7 @@ "chassis_db_address" : "10.8.1.200", "inband_address" : "10.8.1.200/24", "switch_type": "fabric", + "switch_id": "0", "sub_role" : "BackEnd", "start_chassis_db" : "1", "comment" : "default_config for a vs that runs chassis_db"