diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index c4f5ad1edb..de8e26f519 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -303,6 +303,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev) static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) { + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); unsigned int i = 0, j = 0, qid; unsigned int rxq_stat_cntrs, txq_stat_cntrs; @@ -310,12 +311,12 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - for_each_rss(qid) { + for (qid = 0; qid < qdev->num_rx_queues; qid++) { OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + offsetof(struct qede_rx_queue, rcv_pkts), 0, sizeof(uint64_t)); @@ -341,7 +342,7 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) i = 0; - for_each_tss(qid) { + for (qid = 0; qid < qdev->num_tx_queues; qid++) { txq = qdev->fp_array[qid].txq; OSAL_MEMSET((uint64_t *)(uintptr_t) @@ -988,7 +989,7 @@ int qede_config_rss(struct rte_eth_dev *eth_dev) for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { id = i / RTE_RETA_GROUP_SIZE; pos = i % RTE_RETA_GROUP_SIZE; - q = i % QEDE_RSS_COUNT(qdev); + q = i % QEDE_RSS_COUNT(eth_dev); reta_conf[id].reta[pos] = q; } if (qede_rss_reta_update(eth_dev, &reta_conf[0], @@ -1162,22 +1163,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); - /* Check requirements for 100G mode */ - if (ECORE_IS_CMT(edev)) { - if (eth_dev->data->nb_rx_queues < 2 || - eth_dev->data->nb_tx_queues < 2) { - DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); - return -EINVAL; - } - - if ((eth_dev->data->nb_rx_queues % 2 != 0) || - (eth_dev->data->nb_tx_queues % 2 != 0)) { - DP_ERR(edev, - "100G mode needs even no. of RX/TX queues\n"); - return -EINVAL; - } - } - /* We need to have min 1 RX queue.There is no min check in * rte_eth_dev_configure(), so we are checking it here. */ @@ -1204,8 +1189,9 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOTSUP; qede_dealloc_fp_resc(eth_dev); - qdev->num_tx_queues = eth_dev->data->nb_tx_queues; - qdev->num_rx_queues = eth_dev->data->nb_rx_queues; + qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; + qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; + if (qede_alloc_fp_resc(qdev)) return -ENOMEM; @@ -1230,7 +1216,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return ret; DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", - QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); + QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); + + if (ECORE_IS_CMT(edev)) + DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", + qdev->num_rx_queues, qdev->num_tx_queues); + return 0; } @@ -1272,6 +1263,10 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, else dev_info->max_rx_queues = (uint16_t)RTE_MIN( QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); + /* Since CMT mode internally doubles the number of queues */ + if (ECORE_IS_CMT(edev)) + dev_info->max_rx_queues = dev_info->max_rx_queues / 2; + dev_info->max_tx_queues = dev_info->max_rx_queues; dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; @@ -1515,18 +1510,18 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) eth_stats->oerrors = stats.common.tx_err_drop_pkts; /* Queue stats */ - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || - (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) + if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || + txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Not all the queue stats will be displayed. Set" " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" " appropriately and retry.\n"); - for_each_rss(qid) { + for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { eth_stats->q_ipackets[i] = *(uint64_t *)( ((char *)(qdev->fp_array[qid].rxq)) + @@ -1546,7 +1541,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) break; } - for_each_tss(qid) { + for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { txq = qdev->fp_array[qid].txq; eth_stats->q_opackets[j] = *((uint64_t *)(uintptr_t) @@ -1563,18 +1558,18 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) static unsigned qede_get_xstats_count(struct qede_dev *qdev) { + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; + if (ECORE_IS_BB(&qdev->edev)) return RTE_DIM(qede_xstats_strings) + RTE_DIM(qede_bb_xstats_strings) + (RTE_DIM(qede_rxq_xstats_strings) * - RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS)); + QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); else return RTE_DIM(qede_xstats_strings) + RTE_DIM(qede_ah_xstats_strings) + (RTE_DIM(qede_rxq_xstats_strings) * - RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS)); + QEDE_RSS_COUNT(dev)); } static int @@ -1615,7 +1610,7 @@ qede_get_xstats_names(struct rte_eth_dev *dev, } } - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); for (qid = 0; qid < rxq_stat_cntrs; qid++) { for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { @@ -1673,17 +1668,15 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, } } - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); for (qid = 0; qid < rxq_stat_cntrs; qid++) { - for_each_rss(qid) { - for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { - xstats[stat_idx].value = *(uint64_t *)( - ((char *)(qdev->fp_array[qid].rxq)) + - qede_rxq_xstats_strings[i].offset); - xstats[stat_idx].id = stat_idx; - stat_idx++; - } + for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { + xstats[stat_idx].value = *(uint64_t *) + (((char *)(qdev->fp_array[qid].rxq)) + + qede_rxq_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; } } @@ -1937,7 +1930,8 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) RTE_PTYPE_UNKNOWN }; - if (eth_dev->rx_pkt_burst == qede_recv_pkts) + if (eth_dev->rx_pkt_burst == qede_recv_pkts || + eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) return ptypes; return NULL; @@ -2004,7 +1998,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, vport_update_params.vport_id = 0; /* pass the L2 handles instead of qids */ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { - idx = i % QEDE_RSS_COUNT(qdev); + idx = i % QEDE_RSS_COUNT(eth_dev); rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; } vport_update_params.rss_params = &rss_params; @@ -2256,7 +2250,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) qdev->mtu = mtu; /* Fix up RX buf size for all queues of the port */ - for_each_rss(i) { + for (i = 0; i < qdev->num_rx_queues; i++) { fp = &qdev->fp_array[i]; if (fp->rxq != NULL) { bufsz = (uint16_t)rte_pktmbuf_data_room_size( @@ -2285,9 +2279,13 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; /* Reassign back */ - dev->rx_pkt_burst = qede_recv_pkts; - dev->tx_pkt_burst = qede_xmit_pkts; - + if (ECORE_IS_CMT(edev)) { + dev->rx_pkt_burst = qede_recv_pkts_cmt; + dev->tx_pkt_burst = qede_xmit_pkts_cmt; + } else { + dev->rx_pkt_burst = qede_recv_pkts; + dev->tx_pkt_burst = qede_xmit_pkts; + } return 0; } @@ -2428,10 +2426,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) pci_addr.bus, pci_addr.devid, pci_addr.function, eth_dev->data->port_id); - eth_dev->rx_pkt_burst = qede_recv_pkts; - eth_dev->tx_pkt_burst = qede_xmit_pkts; - eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { DP_ERR(edev, "Skipping device init from secondary process\n"); return 0; @@ -2489,6 +2483,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); + if (ECORE_IS_CMT(edev)) { + eth_dev->rx_pkt_burst = qede_recv_pkts_cmt; + eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt; + } else { + eth_dev->rx_pkt_burst = qede_recv_pkts; + eth_dev->tx_pkt_burst = qede_xmit_pkts; + } + + eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; + /* For CMT mode device do periodic polling for slowpath events. * This is required since uio device uses only one MSI-x * interrupt vector but we need one for each engine. diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h index c06274d94c..735dfdb667 100644 --- a/drivers/net/qede/qede_ethdev.h +++ b/drivers/net/qede/qede_ethdev.h @@ -66,8 +66,8 @@ (edev)->dev_info.num_tc) #define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues) -#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues) -#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues) +#define QEDE_RSS_COUNT(dev) ((dev)->data->nb_rx_queues) +#define QEDE_TSS_COUNT(dev) ((dev)->data->nb_tx_queues) #define QEDE_DUPLEX_FULL 1 #define QEDE_DUPLEX_HALF 2 @@ -215,6 +215,7 @@ struct qede_dev { struct qed_dev_eth_info dev_info; struct ecore_sb_info *sb_array; struct qede_fastpath *fp_array; + struct qede_fastpath_cmt *fp_array_cmt; uint16_t mtu; bool enable_tx_switching; bool rss_enable; diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c index 0beade6d59..56ec912720 100644 --- a/drivers/net/qede/qede_filter.c +++ b/drivers/net/qede/qede_filter.c @@ -431,7 +431,7 @@ qede_fdir_filter_add(struct rte_eth_dev *eth_dev, return -EINVAL; } - if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) { + if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) { DP_ERR(edev, "invalid queue number %u\n", fdir->action.rx_queue); return -EINVAL; @@ -1343,7 +1343,6 @@ qede_flow_parse_actions(struct rte_eth_dev *dev, struct rte_flow_error *error, struct rte_flow *flow) { - struct qede_dev *qdev = QEDE_INIT_QDEV(dev); const struct rte_flow_action_queue *queue; if (actions == NULL) { @@ -1358,7 +1357,7 @@ qede_flow_parse_actions(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_QUEUE: queue = actions->conf; - if (queue->index >= QEDE_RSS_COUNT(qdev)) { + if (queue->index >= QEDE_RSS_COUNT(dev)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 64fd9e0639..ca1305f1b4 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -260,13 +260,30 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid, bufsz = rc; - rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc, - socket_id, mp, bufsz); - if (!rxq) - return -ENOMEM; + if (ECORE_IS_CMT(edev)) { + rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc, + socket_id, mp, bufsz); + if (!rxq) + return -ENOMEM; + + qdev->fp_array[qid * 2].rxq = rxq; + rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc, + socket_id, mp, bufsz); + if (!rxq) + return -ENOMEM; + + qdev->fp_array[qid * 2 + 1].rxq = rxq; + /* provide per engine fp struct as rx queue */ + dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid]; + } else { + rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc, + socket_id, mp, bufsz); + if (!rxq) + return -ENOMEM; - dev->data->rx_queues[qid] = rxq; - qdev->fp_array[qid].rxq = rxq; + dev->data->rx_queues[qid] = rxq; + qdev->fp_array[qid].rxq = rxq; + } DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n", qid, nb_desc, rxq->rx_buf_size, socket_id); @@ -314,6 +331,7 @@ static void _qede_rx_queue_release(struct qede_dev *qdev, void qede_rx_queue_release(void *rx_queue) { struct qede_rx_queue *rxq = rx_queue; + struct qede_fastpath_cmt *fp_cmt; struct qede_dev *qdev; struct ecore_dev *edev; @@ -321,7 +339,13 @@ void qede_rx_queue_release(void *rx_queue) qdev = rxq->qdev; edev = QEDE_INIT_EDEV(qdev); PMD_INIT_FUNC_TRACE(edev); - _qede_rx_queue_release(qdev, edev, rxq); + if (ECORE_IS_CMT(edev)) { + fp_cmt = rx_queue; + _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq); + _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq); + } else { + _qede_rx_queue_release(qdev, edev, rxq); + } } } @@ -454,13 +478,30 @@ qede_tx_queue_setup(struct rte_eth_dev *dev, dev->data->tx_queues[queue_idx] = NULL; } - txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc, - socket_id, tx_conf); - if (!txq) - return -ENOMEM; + if (ECORE_IS_CMT(edev)) { + txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc, + socket_id, tx_conf); + if (!txq) + return -ENOMEM; + + qdev->fp_array[queue_idx * 2].txq = txq; + txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc, + socket_id, tx_conf); + if (!txq) + return -ENOMEM; + + qdev->fp_array[(queue_idx * 2) + 1].txq = txq; + dev->data->tx_queues[queue_idx] = + &qdev->fp_array_cmt[queue_idx]; + } else { + txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc, + socket_id, tx_conf); + if (!txq) + return -ENOMEM; - dev->data->tx_queues[queue_idx] = txq; - qdev->fp_array[queue_idx].txq = txq; + dev->data->tx_queues[queue_idx] = txq; + qdev->fp_array[queue_idx].txq = txq; + } return 0; } @@ -503,6 +544,7 @@ static void _qede_tx_queue_release(struct qede_dev *qdev, void qede_tx_queue_release(void *tx_queue) { struct qede_tx_queue *txq = tx_queue; + struct qede_fastpath_cmt *fp_cmt; struct qede_dev *qdev; struct ecore_dev *edev; @@ -510,7 +552,14 @@ void qede_tx_queue_release(void *tx_queue) qdev = txq->qdev; edev = QEDE_INIT_EDEV(qdev); PMD_INIT_FUNC_TRACE(edev); - _qede_tx_queue_release(qdev, edev, txq); + + if (ECORE_IS_CMT(edev)) { + fp_cmt = tx_queue; + _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq); + _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq); + } else { + _qede_tx_queue_release(qdev, edev, txq); + } } } @@ -548,6 +597,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev) struct qede_fastpath *fp; uint32_t num_sbs; uint16_t sb_idx; + int i; if (IS_VF(edev)) ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); @@ -571,6 +621,28 @@ int qede_alloc_fp_resc(struct qede_dev *qdev) memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) * sizeof(*qdev->fp_array)); + if (ECORE_IS_CMT(edev)) { + qdev->fp_array_cmt = rte_calloc("fp_cmt", + QEDE_RXTX_MAX(qdev) / 2, + sizeof(*qdev->fp_array_cmt), + RTE_CACHE_LINE_SIZE); + + if (!qdev->fp_array_cmt) { + DP_ERR(edev, "fp array for CMT allocation failed\n"); + return -ENOMEM; + } + + memset((void *)qdev->fp_array_cmt, 0, + (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt)); + + /* Establish the mapping of fp_array with fp_array_cmt */ + for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) { + qdev->fp_array_cmt[i].qdev = qdev; + qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2]; + qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1]; + } + } + for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { fp = &qdev->fp_array[sb_idx]; if (!fp) @@ -635,6 +707,10 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) if (qdev->fp_array) rte_free(qdev->fp_array); qdev->fp_array = NULL; + + if (qdev->fp_array_cmt) + rte_free(qdev->fp_array_cmt); + qdev->fp_array_cmt = NULL; } static inline void @@ -686,9 +762,9 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) int hwfn_index; int rc; - if (rx_queue_id < eth_dev->data->nb_rx_queues) { + if (rx_queue_id < qdev->num_rx_queues) { fp = &qdev->fp_array[rx_queue_id]; - rxq = eth_dev->data->rx_queues[rx_queue_id]; + rxq = fp->rxq; /* Allocate buffers for the Rx ring */ for (j = 0; j < rxq->nb_rx_desc; j++) { rc = qede_alloc_rx_buffer(rxq); @@ -757,9 +833,9 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) int hwfn_index; int rc; - if (tx_queue_id < eth_dev->data->nb_tx_queues) { - txq = eth_dev->data->tx_queues[tx_queue_id]; + if (tx_queue_id < qdev->num_tx_queues) { fp = &qdev->fp_array[tx_queue_id]; + txq = fp->txq; memset(¶ms, 0, sizeof(params)); params.queue_id = tx_queue_id / edev->num_hwfns; params.vport_id = 0; @@ -900,8 +976,8 @@ static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) int hwfn_index; int rc; - if (tx_queue_id < eth_dev->data->nb_tx_queues) { - txq = eth_dev->data->tx_queues[tx_queue_id]; + if (tx_queue_id < qdev->num_tx_queues) { + txq = qdev->fp_array[tx_queue_id].txq; /* Drain txq */ if (qede_drain_txq(qdev, txq, true)) return -1; /* For the lack of retcodes */ @@ -932,13 +1008,13 @@ int qede_start_queues(struct rte_eth_dev *eth_dev) uint8_t id; int rc = -1; - for_each_rss(id) { + for (id = 0; id < qdev->num_rx_queues; id++) { rc = qede_rx_queue_start(eth_dev, id); if (rc != ECORE_SUCCESS) return -1; } - for_each_tss(id) { + for (id = 0; id < qdev->num_tx_queues; id++) { rc = qede_tx_queue_start(eth_dev, id); if (rc != ECORE_SUCCESS) return -1; @@ -953,13 +1029,11 @@ void qede_stop_queues(struct rte_eth_dev *eth_dev) uint8_t id; /* Stopping RX/TX queues */ - for_each_tss(id) { + for (id = 0; id < qdev->num_tx_queues; id++) qede_tx_queue_stop(eth_dev, id); - } - for_each_rss(id) { + for (id = 0; id < qdev->num_rx_queues; id++) qede_rx_queue_stop(eth_dev, id); - } } static inline bool qede_tunn_exist(uint16_t flag) @@ -1739,6 +1813,23 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return rx_pkt; } +uint16_t +qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct qede_fastpath_cmt *fp_cmt = p_fp_cmt; + uint16_t eng0_pkts, eng1_pkts; + + eng0_pkts = nb_pkts / 2; + + eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts); + + eng1_pkts = nb_pkts - eng0_pkts; + + eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts, + eng1_pkts); + + return eng0_pkts + eng1_pkts; +} /* Populate scatter gather buffer descriptor fields */ static inline uint16_t @@ -2261,6 +2352,24 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_pkt_sent; } +uint16_t +qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct qede_fastpath_cmt *fp_cmt = p_fp_cmt; + uint16_t eng0_pkts, eng1_pkts; + + eng0_pkts = nb_pkts / 2; + + eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts); + + eng1_pkts = nb_pkts - eng0_pkts; + + eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts, + eng1_pkts); + + return eng0_pkts + eng1_pkts; +} + uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq, __rte_unused struct rte_mbuf **pkts, diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h index 5b249cbb26..4a14356636 100644 --- a/drivers/net/qede/qede_rxtx.h +++ b/drivers/net/qede/qede_rxtx.h @@ -81,10 +81,8 @@ ETH_RSS_VXLAN |\ ETH_RSS_GENEVE) -#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++) -#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++) #define QEDE_RXTX_MAX(qdev) \ - (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev))) + (RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues)) /* Macros for non-tunnel packet types lkup table */ #define QEDE_PKT_TYPE_UNKNOWN 0x0 @@ -179,6 +177,8 @@ struct qede_agg_info { * Structure associated with each RX queue. */ struct qede_rx_queue { + /* Always keep qdev as first member */ + struct qede_dev *qdev; struct rte_mempool *mb_pool; struct ecore_chain rx_bd_ring; struct ecore_chain rx_comp_ring; @@ -199,7 +199,6 @@ struct qede_rx_queue { uint64_t rx_hw_errors; uint64_t rx_alloc_errors; struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; - struct qede_dev *qdev; void *handle; }; @@ -217,6 +216,8 @@ union db_prod { }; struct qede_tx_queue { + /* Always keep qdev as first member */ + struct qede_dev *qdev; struct ecore_chain tx_pbl; struct qede_tx_entry *sw_tx_ring; uint16_t nb_tx_desc; @@ -231,7 +232,6 @@ struct qede_tx_queue { uint16_t port_id; uint64_t xmit_pkts; bool is_legacy; - struct qede_dev *qdev; void *handle; }; @@ -241,6 +241,18 @@ struct qede_fastpath { struct qede_tx_queue *txq; }; +/* This structure holds the inforation of fast path queues + * belonging to individual engines in CMT mode. + */ +struct qede_fastpath_cmt { + /* Always keep this a first element */ + struct qede_dev *qdev; + /* fastpath info of engine 0 */ + struct qede_fastpath *fp0; + /* fastpath info of engine 1 */ + struct qede_fastpath *fp1; +}; + /* * RX/TX function prototypes */ @@ -261,12 +273,16 @@ void qede_tx_queue_release(void *tx_queue); uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t qede_rxtx_pkts_dummy(void *p_rxq, struct rte_mbuf **pkts,