Skip to content

Commit

Permalink
[QosOrch] The notifications cannot be drained in QosOrch in case the …
Browse files Browse the repository at this point in the history
…first one needs to retry (sonic-net#2206)

What I did
Bugfix: The notifications cannot be drained in QosOrch in case the first one needs to retry

Why I did it
Bugfix

How I verified it
Mock test and manual test.

Details if related
This is because both doTask and the table handlers take consumer as the argument.
In doTask, each notification is iterated in the main loop and passed to table handlers. However, consumer is passed to table handlers, which makes it unable to access the rest notifications following the first one. In case the first one needs to retry and remains in m_toSync, the rest notifications do not have opportunity to run.

Signed-off-by: Stephen Sun <stephens@nvidia.com>
  • Loading branch information
stephenxs committed Mar 31, 2022
1 parent 5575935 commit c2de7fc
Show file tree
Hide file tree
Showing 3 changed files with 133 additions and 46 deletions.
56 changes: 25 additions & 31 deletions orchagent/qosorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,11 @@ map<string, string> qos_to_ref_table_map = {
#define DSCP_MAX_VAL 63
#define EXP_MAX_VAL 7

task_process_status QosMapHandler::processWorkItem(Consumer& consumer)
task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();

sai_object_id_t sai_object = SAI_NULL_OBJECT_ID;
auto it = consumer.m_toSync.begin();
KeyOpFieldsValuesTuple tuple = it->second;
string qos_object_name = kfvKey(tuple);
string qos_map_type_name = consumer.getTableName();
string op = kfvOp(tuple);
Expand Down Expand Up @@ -321,11 +319,11 @@ bool DscpToTcMapHandler::removeQosItem(sai_object_id_t sai_object)
return true;
}

task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer)
task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
DscpToTcMapHandler dscp_tc_handler;
return dscp_tc_handler.processWorkItem(consumer);
return dscp_tc_handler.processWorkItem(consumer, tuple);
}

bool MplsTcToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -376,11 +374,11 @@ sai_object_id_t MplsTcToTcMapHandler::addQosItem(const vector<sai_attribute_t> &
return sai_object;
}

task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer)
task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
MplsTcToTcMapHandler mpls_tc_to_tc_handler;
return mpls_tc_to_tc_handler.processWorkItem(consumer);
return mpls_tc_to_tc_handler.processWorkItem(consumer, tuple);
}

bool Dot1pToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -445,11 +443,11 @@ sai_object_id_t Dot1pToTcMapHandler::addQosItem(const vector<sai_attribute_t> &a
return object_id;
}

task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer)
task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
Dot1pToTcMapHandler dot1p_tc_handler;
return dot1p_tc_handler.processWorkItem(consumer);
return dot1p_tc_handler.processWorkItem(consumer, tuple);
}

bool TcToQueueMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -498,11 +496,11 @@ sai_object_id_t TcToQueueMapHandler::addQosItem(const vector<sai_attribute_t> &a
return sai_object;
}

task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer)
task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
TcToQueueMapHandler tc_queue_handler;
return tc_queue_handler.processWorkItem(consumer);
return tc_queue_handler.processWorkItem(consumer, tuple);
}

void WredMapHandler::freeAttribResources(vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -719,11 +717,11 @@ bool WredMapHandler::removeQosItem(sai_object_id_t sai_object)
return true;
}

task_process_status QosOrch::handleWredProfileTable(Consumer& consumer)
task_process_status QosOrch::handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
WredMapHandler wred_handler;
return wred_handler.processWorkItem(consumer);
return wred_handler.processWorkItem(consumer, tuple);
}

bool TcToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -772,11 +770,11 @@ sai_object_id_t TcToPgHandler::addQosItem(const vector<sai_attribute_t> &attribu

}

task_process_status QosOrch::handleTcToPgTable(Consumer& consumer)
task_process_status QosOrch::handleTcToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
TcToPgHandler tc_to_pg_handler;
return tc_to_pg_handler.processWorkItem(consumer);
return tc_to_pg_handler.processWorkItem(consumer, tuple);
}

bool PfcPrioToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -826,11 +824,11 @@ sai_object_id_t PfcPrioToPgHandler::addQosItem(const vector<sai_attribute_t> &at

}

task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer)
task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
PfcPrioToPgHandler pfc_prio_to_pg_handler;
return pfc_prio_to_pg_handler.processWorkItem(consumer);
return pfc_prio_to_pg_handler.processWorkItem(consumer, tuple);
}

bool PfcToQueueHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes)
Expand Down Expand Up @@ -967,11 +965,11 @@ sai_object_id_t DscpToFcMapHandler::addQosItem(const vector<sai_attribute_t> &at
return sai_object;
}

task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer)
task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
DscpToFcMapHandler dscp_fc_handler;
return dscp_fc_handler.processWorkItem(consumer);
return dscp_fc_handler.processWorkItem(consumer, tuple);
}

bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple,
Expand Down Expand Up @@ -1058,18 +1056,18 @@ sai_object_id_t ExpToFcMapHandler::addQosItem(const vector<sai_attribute_t> &att
return sai_object;
}

task_process_status QosOrch::handleExpToFcTable(Consumer& consumer)
task_process_status QosOrch::handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
ExpToFcMapHandler exp_fc_handler;
return exp_fc_handler.processWorkItem(consumer);
return exp_fc_handler.processWorkItem(consumer, tuple);
}

task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer)
task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
PfcToQueueHandler pfc_to_queue_handler;
return pfc_to_queue_handler.processWorkItem(consumer);
return pfc_to_queue_handler.processWorkItem(consumer, tuple);
}

QosOrch::QosOrch(DBConnector *db, vector<string> &tableNames) : Orch(db, tableNames)
Expand Down Expand Up @@ -1104,14 +1102,13 @@ void QosOrch::initTableHandlers()
m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, &QosOrch::handlePfcToQueueTable));
}

task_process_status QosOrch::handleSchedulerTable(Consumer& consumer)
task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();

sai_status_t sai_status;
sai_object_id_t sai_object = SAI_NULL_OBJECT_ID;

KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second;
string qos_map_type_name = CFG_SCHEDULER_TABLE_NAME;
string qos_object_name = kfvKey(tuple);
string op = kfvOp(tuple);
Expand Down Expand Up @@ -1457,11 +1454,9 @@ bool QosOrch::applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_i
return true;
}

task_process_status QosOrch::handleQueueTable(Consumer& consumer)
task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
auto it = consumer.m_toSync.begin();
KeyOpFieldsValuesTuple tuple = it->second;
Port port;
bool result;
string key = kfvKey(tuple);
Expand Down Expand Up @@ -1690,11 +1685,10 @@ task_process_status QosOrch::ResolveMapAndApplyToPort(
return task_process_status::task_success;
}

task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer)
task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();

KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second;
string key = kfvKey(tuple);
string op = kfvOp(tuple);
vector<string> port_names = tokenize(key, list_item_delimiter);
Expand Down Expand Up @@ -1898,7 +1892,7 @@ void QosOrch::doTask(Consumer &consumer)
continue;
}

auto task_status = (this->*(m_qos_handler_map[qos_map_type_name]))(consumer);
auto task_status = (this->*(m_qos_handler_map[qos_map_type_name]))(consumer, it->second);
switch(task_status)
{
case task_process_status::task_success :
Expand Down
30 changes: 15 additions & 15 deletions orchagent/qosorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ const string ecn_all = "ecn_all";
class QosMapHandler
{
public:
task_process_status processWorkItem(Consumer& consumer);
task_process_status processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
virtual bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes) = 0;
virtual void freeAttribResources(vector<sai_attribute_t> &attributes);
virtual bool modifyQosItem(sai_object_id_t, vector<sai_attribute_t> &attributes);
Expand Down Expand Up @@ -158,25 +158,25 @@ class QosOrch : public Orch
void doTask() override;
virtual void doTask(Consumer& consumer);

typedef task_process_status (QosOrch::*qos_table_handler)(Consumer& consumer);
typedef task_process_status (QosOrch::*qos_table_handler)(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
typedef map<string, qos_table_handler> qos_table_handler_map;
typedef pair<string, qos_table_handler> qos_handler_pair;

void initTableHandlers();

task_process_status handleDscpToTcTable(Consumer& consumer);
task_process_status handleMplsTcToTcTable(Consumer& consumer);
task_process_status handleDot1pToTcTable(Consumer& consumer);
task_process_status handlePfcPrioToPgTable(Consumer& consumer);
task_process_status handlePfcToQueueTable(Consumer& consumer);
task_process_status handlePortQosMapTable(Consumer& consumer);
task_process_status handleTcToPgTable(Consumer& consumer);
task_process_status handleTcToQueueTable(Consumer& consumer);
task_process_status handleSchedulerTable(Consumer& consumer);
task_process_status handleQueueTable(Consumer& consumer);
task_process_status handleWredProfileTable(Consumer& consumer);
task_process_status handleDscpToFcTable(Consumer& consumer);
task_process_status handleExpToFcTable(Consumer& consumer);
task_process_status handleDscpToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleMplsTcToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleDot1pToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handlePfcPrioToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handlePfcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleTcToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleTcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleSchedulerTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);

sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id);

Expand Down
93 changes: 93 additions & 0 deletions tests/mock_tests/qosorch_ut.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -786,4 +786,97 @@ namespace qosorch_test
static_cast<Orch *>(gQosOrch)->doTask();
ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id);
}

TEST_F(QosOrchTest, QosOrchTestRetryFirstItem)
{
// There was a bug in QosOrch that the 2nd notifications and after can not be handled, eg the 1st one needs to be retried
// This is to verify the bug has been fixed
vector<string> ts;
std::deque<KeyOpFieldsValuesTuple> entries;

// Try adding dscp_to_tc_map AZURE.1 and AZURE to PORT_QOS_MAP table
// The object AZURE.1 does not exist so the first item can not be handled and remain in m_toSync.
entries.push_back({"Ethernet0", "SET",
{
{"dscp_to_tc_map", "AZURE.1"}
}});
entries.push_back({"Ethernet4", "SET",
{
{"dscp_to_tc_map", "AZURE"}
}});
auto portQosMapConsumer = dynamic_cast<Consumer *>(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME));
portQosMapConsumer->addToSync(entries);
entries.clear();
static_cast<Orch *>(gQosOrch)->doTask();
// The 2nd notification should be handled. Make sure by checking reference
CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet4", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE");
// Make sure there is one item left
portQosMapConsumer->dumpPendingTasks(ts);
ASSERT_EQ(ts[0], "PORT_QOS_MAP|Ethernet0|SET|dscp_to_tc_map:AZURE.1");
ASSERT_EQ(ts.size(), 1);
ts.clear();

// Try adding scheduler.0 and scheduler.2 to QUEUE table
entries.push_back({"Ethernet0|0", "SET",
{
{"scheduler", "scheduler.2"}
}});
entries.push_back({"Ethernet0|1", "SET",
{
{"scheduler", "scheduler.0"}
}});
auto queueConsumer = dynamic_cast<Consumer *>(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME));
queueConsumer->addToSync(entries);
entries.clear();
static_cast<Orch *>(gQosOrch)->doTask();
// The 2nd notification should be handled. Make sure by checking reference
CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|1", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0");
// Make sure there is one item left
queueConsumer->dumpPendingTasks(ts);
ASSERT_EQ(ts[0], "QUEUE|Ethernet0|0|SET|scheduler:scheduler.2");
ASSERT_EQ(ts.size(), 1);
ts.clear();

// Try removing AZURE and adding AZURE.1 to DSCP_TO_TC_MAP table
entries.push_back({"AZURE", "DEL", {{}}});
entries.push_back({"AZURE.1", "SET",
{
{"1", "1"}
}});
auto consumer = dynamic_cast<Consumer *>(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME));
consumer->addToSync(entries);
entries.clear();
static_cast<Orch *>(gQosOrch)->doTask();
// The 2nd notification should be handled. Make sure by checking reference
CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE.1");
// The pending item in PORT_QOS_MAP table should also be handled since the dependency is met
portQosMapConsumer->dumpPendingTasks(ts);
ASSERT_TRUE(ts.empty());
consumer->dumpPendingTasks(ts);
ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL|:");
ASSERT_EQ(ts.size(), 1);
ts.clear();

entries.push_back({"scheduler.0", "DEL", {{}}});
entries.push_back({"scheduler.2", "SET",
{
{"type", "DWRR"},
{"weight", "15"}
}});
consumer = dynamic_cast<Consumer *>(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME));
consumer->addToSync(entries);
entries.clear();
static_cast<Orch *>(gQosOrch)->doTask();
// We need a second call to "doTask" because scheduler table is handled after queue table
static_cast<Orch *>(gQosOrch)->doTask();
// The 2nd notification should be handled. Make sure by checking reference
CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.2");
// The pending item in QUEUE table should also be handled since the dependency is met
queueConsumer->dumpPendingTasks(ts);
ASSERT_TRUE(ts.empty());
consumer->dumpPendingTasks(ts);
ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL|:");
ASSERT_EQ(ts.size(), 1);
ts.clear();
}
}

0 comments on commit c2de7fc

Please sign in to comment.