Skip to content

Commit

Permalink
net/avf: enable SSE Rx Tx
Browse files Browse the repository at this point in the history
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
  • Loading branch information
wujingji authored and Ferruh Yigit committed Jan 16, 2018
1 parent cbdbd36 commit 319c421
Show file tree
Hide file tree
Showing 9 changed files with 1,118 additions and 11 deletions.
1 change: 1 addition & 0 deletions config/common_base
Expand Up @@ -221,6 +221,7 @@ CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
#
CONFIG_RTE_LIBRTE_AVF_PMD=y
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
Expand Down
36 changes: 36 additions & 0 deletions doc/guides/nics/features/avf_vec.ini
@@ -0,0 +1,36 @@
;
; Supported features of the 'avf_vec' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Speed capabilities = Y
Link status = Y
Link status event = Y
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
VLAN filter = Y
CRC offload = Y
VLAN offload = P
L3 checksum offload = P
L4 checksum offload = P
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
Linux VFIO = Y
x86-32 = Y
x86-64 = Y
3 changes: 3 additions & 0 deletions drivers/net/avf/Makefile
Expand Up @@ -47,5 +47,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c
SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c
SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_rxtx.c
ifeq ($(CONFIG_RTE_ARCH_x86), y)
SRCS-$(CONFIG_RTE_LIBRTE_AVF_INC_VECTOR) += avf_rxtx_vec_sse.c
endif

include $(RTE_SDK)/mk/rte.lib.mk
4 changes: 4 additions & 0 deletions drivers/net/avf/avf.h
Expand Up @@ -116,6 +116,10 @@ struct avf_adapter {
struct avf_hw hw;
struct rte_eth_dev *eth_dev;
struct avf_info vf;

/* For vector PMD */
bool rx_vec_allowed;
bool tx_vec_allowed;
};

/* AVF_DEV_PRIVATE_TO */
Expand Down
11 changes: 11 additions & 0 deletions drivers/net/avf/avf_ethdev.c
Expand Up @@ -121,6 +121,17 @@ avf_dev_configure(struct rte_eth_dev *dev)
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;

#ifdef RTE_LIBRTE_AVF_INC_VECTOR
/* Initialize to TRUE. If any of Rx queues doesn't meet the
* vector Rx/Tx preconditions, it will be reset.
*/
ad->rx_vec_allowed = true;
ad->tx_vec_allowed = true;
#else
ad->rx_vec_allowed = false;
ad->tx_vec_allowed = false;
#endif

/* Vlan stripping setting */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
Expand Down
172 changes: 162 additions & 10 deletions drivers/net/avf/avf_rxtx.c
Expand Up @@ -92,6 +92,34 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
return 0;
}

#ifdef RTE_LIBRTE_AVF_INC_VECTOR
static inline bool
check_rx_vec_allow(struct avf_rx_queue *rxq)
{
if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST &&
rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
return TRUE;
}

PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
return FALSE;
}

static inline bool
check_tx_vec_allow(struct avf_tx_queue *txq)
{
if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
return TRUE;
}
PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
return FALSE;
}
#endif

static inline void
reset_rx_queue(struct avf_rx_queue *rxq)
{
Expand Down Expand Up @@ -225,6 +253,14 @@ release_txq_mbufs(struct avf_tx_queue *txq)
}
}

static const struct avf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};

static const struct avf_txq_ops def_txq_ops = {
.release_mbufs = release_txq_mbufs,
};

int
avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
Expand Down Expand Up @@ -325,7 +361,12 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = TRUE;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
rxq->ops = &def_rxq_ops;

#ifdef RTE_LIBRTE_AVF_INC_VECTOR
if (check_rx_vec_allow(rxq) == FALSE)
ad->rx_vec_allowed = false;
#endif
return 0;
}

Expand All @@ -337,6 +378,8 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf)
{
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct avf_adapter *ad =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_tx_queue *txq;
const struct rte_memzone *mz;
uint32_t ring_size;
Expand Down Expand Up @@ -416,6 +459,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
txq->ops = &def_txq_ops;

#ifdef RTE_LIBRTE_AVF_INC_VECTOR
if (check_tx_vec_allow(txq) == FALSE)
ad->tx_vec_allowed = false;
#endif

return 0;
}
Expand Down Expand Up @@ -514,7 +563,7 @@ avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}

rxq = dev->data->rx_queues[rx_queue_id];
release_rxq_mbufs(rxq);
rxq->ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;

Expand Down Expand Up @@ -542,7 +591,7 @@ avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}

txq = dev->data->tx_queues[tx_queue_id];
release_txq_mbufs(txq);
txq->ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;

Expand All @@ -557,7 +606,7 @@ avf_dev_rx_queue_release(void *rxq)
if (!q)
return;

release_rxq_mbufs(q);
q->ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
Expand All @@ -571,7 +620,7 @@ avf_dev_tx_queue_release(void *txq)
if (!q)
return;

release_txq_mbufs(q);
q->ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
Expand All @@ -595,15 +644,15 @@ avf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
release_txq_mbufs(txq);
txq->ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
release_rxq_mbufs(rxq);
rxq->ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
Expand Down Expand Up @@ -1320,6 +1369,27 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}

static uint16_t
avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
uint16_t nb_tx = 0;
struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;

while (nb_pkts) {
uint16_t ret, num;

num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
nb_tx += ret;
nb_pkts -= ret;
if (ret < num)
break;
}

return nb_tx;
}

/* TX prep functions */
uint16_t
avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
Expand Down Expand Up @@ -1372,18 +1442,64 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
void
avf_set_rx_function(struct rte_eth_dev *dev)
{
if (dev->data->scattered_rx)
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_rx_queue *rxq;
int i;

if (adapter->rx_vec_allowed) {
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback"
" (port=%d).", dev->data->port_id);
dev->rx_pkt_burst = avf_recv_scattered_pkts_vec;
} else {
PMD_DRV_LOG(DEBUG, "Using Vector Rx callback"
" (port=%d).", dev->data->port_id);
dev->rx_pkt_burst = avf_recv_pkts_vec;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
avf_rxq_vec_setup(rxq);
}
} else if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = avf_recv_scattered_pkts;
else
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = avf_recv_pkts;
}
}

/* choose tx function*/
void
avf_set_tx_function(struct rte_eth_dev *dev)
{
dev->tx_pkt_burst = avf_xmit_pkts;
dev->tx_pkt_prepare = avf_prep_pkts;
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_tx_queue *txq;
int i;

if (adapter->tx_vec_allowed) {
PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).",
dev->data->port_id);
dev->tx_pkt_burst = avf_xmit_pkts_vec;
dev->tx_pkt_prepare = NULL;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (!txq)
continue;
avf_txq_vec_setup(txq);
}
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
dev->data->port_id);
dev->tx_pkt_burst = avf_xmit_pkts;
dev->tx_pkt_prepare = avf_prep_pkts;
}
}

void
Expand Down Expand Up @@ -1505,3 +1621,39 @@ avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)

return RTE_ETH_TX_DESC_FULL;
}

uint16_t __attribute__((weak))
avf_recv_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}

uint16_t __attribute__((weak))
avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}

uint16_t __attribute__((weak))
avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}

int __attribute__((weak))
avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
{
return -1;
}

int __attribute__((weak))
avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
{
return -1;
}

0 comments on commit 319c421

Please sign in to comment.