Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions doc/guides/nics/idpf.rst
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,11 @@ Runtime Configuration
Then the PMD will configure Tx queue with single queue mode.
Otherwise, split queue mode is chosen by default.

.. note::

In split queue mode, sharing a completion queue among multiple TX queues that are
serviced by different CPU cores is not supported.


Driver compilation and testing
------------------------------
Expand Down
1 change: 1 addition & 0 deletions drivers/net/intel/idpf/idpf_common_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ enum idpf_rx_func_type {
IDPF_RX_SINGLEQ,
IDPF_RX_SINGLEQ_SCATTERED,
IDPF_RX_SINGLEQ_AVX2,
IDPF_RX_AVX2,
IDPF_RX_AVX512,
IDPF_RX_SINGLEQ_AVX512,
IDPF_RX_MAX
Expand Down
59 changes: 59 additions & 0 deletions drivers/net/intel/idpf/idpf_common_rxtx.c
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,58 @@ idpf_qc_split_tx_complq_reset(struct ci_tx_queue *cq)
cq->expected_gen_id = 1;
}

RTE_EXPORT_INTERNAL_SYMBOL(idpf_splitq_rearm_common)
void
idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
{
struct rte_mbuf **rxp = &rx_bufq->sw_ring[rx_bufq->rxrearm_start];
volatile union virtchnl2_rx_buf_desc *rxdp = rx_bufq->rx_ring;
uint16_t rx_id;
int i;

rxdp += rx_bufq->rxrearm_start;

/* Pull 'n' more MBUFs into the software ring */
if (rte_mbuf_raw_alloc_bulk(rx_bufq->mp,
(void *)rxp, IDPF_RXQ_REARM_THRESH) < 0) {
if (rx_bufq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
rx_bufq->nb_rx_desc) {
for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
rxp[i] = &rx_bufq->fake_mbuf;
rxdp[i] = (union virtchnl2_rx_buf_desc){0};
}
}
rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}

/* Initialize the mbufs in vector, process 8 mbufs in one loop */
for (i = 0; i < IDPF_RXQ_REARM_THRESH;
i += 8, rxp += 8, rxdp += 8) {
rxdp[0].split_rd.pkt_addr = rxp[0]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[1].split_rd.pkt_addr = rxp[1]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[2].split_rd.pkt_addr = rxp[2]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[3].split_rd.pkt_addr = rxp[3]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[4].split_rd.pkt_addr = rxp[4]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[5].split_rd.pkt_addr = rxp[5]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[6].split_rd.pkt_addr = rxp[6]->buf_iova + RTE_PKTMBUF_HEADROOM;
rxdp[7].split_rd.pkt_addr = rxp[7]->buf_iova + RTE_PKTMBUF_HEADROOM;
}

rx_bufq->rxrearm_start += IDPF_RXQ_REARM_THRESH;
if (rx_bufq->rxrearm_start >= rx_bufq->nb_rx_desc)
rx_bufq->rxrearm_start = 0;

rx_bufq->rxrearm_nb -= IDPF_RXQ_REARM_THRESH;

rx_id = (uint16_t)((rx_bufq->rxrearm_start == 0) ?
(rx_bufq->nb_rx_desc - 1) : (rx_bufq->rxrearm_start - 1));

/* Update the tail pointer on the NIC */
IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, rx_id);
}

RTE_EXPORT_INTERNAL_SYMBOL(idpf_qc_single_tx_queue_reset)
void
idpf_qc_single_tx_queue_reset(struct ci_tx_queue *txq)
Expand Down Expand Up @@ -1684,6 +1736,13 @@ const struct ci_rx_path_info idpf_rx_path_infos[] = {
.rx_offloads = IDPF_RX_VECTOR_OFFLOADS,
.simd_width = RTE_VECT_SIMD_256,
.extra.single_queue = true}},
[IDPF_RX_AVX2] = {
.pkt_burst = idpf_dp_splitq_recv_pkts_avx2,
.info = "Split AVX2 Vector",
.features = {
.rx_offloads = IDPF_RX_VECTOR_OFFLOADS,
.simd_width = RTE_VECT_SIMD_256,
}},
#ifdef CC_AVX512_SUPPORT
[IDPF_RX_AVX512] = {
.pkt_burst = idpf_dp_splitq_recv_pkts_avx512,
Expand Down
8 changes: 8 additions & 0 deletions drivers/net/intel/idpf/idpf_common_rxtx.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,8 @@ void idpf_qc_split_tx_descq_reset(struct ci_tx_queue *txq);
__rte_internal
void idpf_qc_split_tx_complq_reset(struct ci_tx_queue *cq);
__rte_internal
void idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq);
__rte_internal
void idpf_qc_single_tx_queue_reset(struct ci_tx_queue *txq);
__rte_internal
void idpf_qc_rx_queue_release(void *rxq);
Expand Down Expand Up @@ -252,13 +254,19 @@ __rte_internal
uint16_t idpf_dp_splitq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
__rte_internal
uint16_t idpf_dp_splitq_recv_pkts_avx2(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
__rte_internal
uint16_t idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
__rte_internal
uint16_t idpf_dp_singleq_recv_pkts_avx2(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
__rte_internal
uint16_t idpf_dp_splitq_xmit_pkts_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
__rte_internal
uint16_t idpf_dp_singleq_xmit_pkts_avx2(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
Expand Down
Loading