Skip to content

Commit

Permalink
net/dpaa2: enable error queues optionally
Browse files Browse the repository at this point in the history
In case error packets are received by the Ethernet interface,
this patch enables receiving packets on the error queue,
printing the error and the error packet.

To enable, use the dev_arg as : fslmc:dpni.1,drv_error_queue=1

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
  • Loading branch information
nipungupta87 authored and Ferruh Yigit committed Feb 24, 2021
1 parent 8d21c56 commit 4690a61
Show file tree
Hide file tree
Showing 5 changed files with 168 additions and 8 deletions.
6 changes: 6 additions & 0 deletions doc/guides/nics/dpaa2.rst
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,12 @@ for details.
In this mode tx conf queues need to be polled to free the buffers.
e.g. ``fslmc:dpni.1,drv_tx_conf=1``

* Use dev arg option ``drv_error_queue=1`` to enable Packets in Error queue.
DPAA2 hardware drops the error packet in hardware. This option enables the
hardware to not drop the error packet and let the driver dump the error
packets, so that user can check what is wrong with those packets.
e.g. ``fslmc:dpni.1,drv_error_queue=1``

Enabling logs
-------------

Expand Down
1 change: 1 addition & 0 deletions drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,7 @@ enum qbman_fd_format {
#define DPAA2_GET_FD_FLC(fd) \
(((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.ctrl & 0x000000FF)
#define DPAA2_GET_FD_FA_ERR(fd) ((fd)->simple.ctrl & 0x00000040)
#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
#define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
Expand Down
67 changes: 62 additions & 5 deletions drivers/net/dpaa2/dpaa2_ethdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#define DRIVER_LOOPBACK_MODE "drv_loopback"
#define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
#define DRIVER_TX_CONF "drv_tx_conf"
#define DRIVER_ERROR_QUEUE "drv_err_queue"
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */

Expand Down Expand Up @@ -71,6 +72,9 @@ bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
uint64_t dpaa2_timestamp_rx_dynflag;
int dpaa2_timestamp_dynfield_offset = -1;

/* Enable error queue */
bool dpaa2_enable_err_queue;

struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
Expand Down Expand Up @@ -391,6 +395,25 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
goto fail;
}

if (dpaa2_enable_err_queue) {
priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
sizeof(struct dpaa2_queue), 0);

dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
dpaa2_q->q_storage = rte_malloc("err_dq_storage",
sizeof(struct queue_storage_info_t) *
RTE_MAX_LCORE,
RTE_CACHE_LINE_SIZE);
if (!dpaa2_q->q_storage)
goto fail;

memset(dpaa2_q->q_storage, 0,
sizeof(struct queue_storage_info_t));
for (i = 0; i < RTE_MAX_LCORE; i++)
if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
goto fail;
}

for (i = 0; i < priv->nb_tx_queues; i++) {
mc_q->eth_data = dev->data;
mc_q->flow_id = 0xffff;
Expand Down Expand Up @@ -458,6 +481,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
rte_free(dpaa2_q->q_storage);
priv->rx_vq[i--] = NULL;
}

if (dpaa2_enable_err_queue) {
dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
if (dpaa2_q->q_storage)
dpaa2_free_dq_storage(dpaa2_q->q_storage);
rte_free(dpaa2_q->q_storage);
}

rte_free(mc_q);
return -1;
}
Expand Down Expand Up @@ -1163,11 +1194,31 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
dpaa2_q->fqid = qid.fqid;
}

/*checksum errors, send them to normal path and set it in annotation */
err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
err_cfg.errors |= DPNI_ERROR_PHE;
if (dpaa2_enable_err_queue) {
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
ret);
return ret;
}
dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
dpaa2_q->fqid = qid.fqid;
dpaa2_q->eth_data = dev->data;

err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
err_cfg.errors = DPNI_ERROR_DISC;
err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
} else {
/* checksum errors, send them to normal path
* and set it in annotation
*/
err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;

/* if packet with parse error are not to be dropped */
err_cfg.errors |= DPNI_ERROR_PHE;

err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
}
err_cfg.set_frame_annotation = true;

ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
Expand Down Expand Up @@ -2624,6 +2675,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
DPAA2_PMD_INFO("TX_CONF Enabled");
}

if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
dpaa2_enable_err_queue = 1;
DPAA2_PMD_INFO("Enable error queue");
}

/* Allocate memory for hardware structure for queues */
ret = dpaa2_alloc_rx_tx_queues(eth_dev);
if (ret) {
Expand Down Expand Up @@ -2863,5 +2919,6 @@ RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
DRIVER_LOOPBACK_MODE "=<int> "
DRIVER_NO_PREFETCH_MODE "=<int>"
DRIVER_TX_CONF "=<int>");
DRIVER_TX_CONF "=<int>"
DRIVER_ERROR_QUEUE "=<int>");
RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);
5 changes: 4 additions & 1 deletion drivers/net/dpaa2/dpaa2_ethdev.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright 2016-2020 NXP
* Copyright 2016-2021 NXP
*
*/

Expand Down Expand Up @@ -117,6 +117,8 @@ extern enum rte_filter_type dpaa2_filter_type;

extern const struct rte_tm_ops dpaa2_tm_ops;

extern bool dpaa2_enable_err_queue;

#define IP_ADDRESS_OFFSET_INVALID (-1)

struct dpaa2_key_info {
Expand Down Expand Up @@ -154,6 +156,7 @@ struct dpaa2_dev_priv {
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
void *tx_conf_vq[MAX_TX_QUEUES];
void *rx_err_vq;
uint8_t flags; /*dpaa2 config flags */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
Expand Down
97 changes: 95 additions & 2 deletions drivers/net/dpaa2/dpaa2_rxtx.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
#include <rte_hexdump.h>

#include <rte_fslmc.h>
#include <fslmc_vfio.h>
Expand Down Expand Up @@ -550,6 +551,93 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
return 0;
}

static void
dump_err_pkts(struct dpaa2_queue *dpaa2_q)
{
/* Function receive frames for a given device and VQ */
struct qbman_result *dq_storage;
uint32_t fqid = dpaa2_q->fqid;
int ret, num_rx = 0, num_pulled;
uint8_t pending, status;
struct qbman_swp *swp;
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
uint32_t lcore_id = rte_lcore_id();
void *v_addr, *hw_annot_addr;
struct dpaa2_fas *fas;

if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
rte_gettid());
return;
}
}
swp = DPAA2_PER_LCORE_PORTAL;

dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);

while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
/* Portal was busy, try again */
continue;
}
break;
}

/* Check if the previous issued command is completed. */
while (!qbman_check_command_complete(dq_storage))
;

num_pulled = 0;
pending = 1;
do {
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
while (!qbman_check_new_result(dq_storage))
;

/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
*/
if (qbman_result_DQ_is_pull_complete(dq_storage)) {
pending = 0;
/* Check for valid frame. */
status = qbman_result_DQ_flags(dq_storage);
if (unlikely((status &
QBMAN_DQ_STAT_VALIDFRAME) == 0))
continue;
}
fd = qbman_result_DQ_fd(dq_storage);
v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
fas = hw_annot_addr;

DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
" fd_off: %d, fd_err: %x, fas_status: %x",
rte_lcore_id(), eth_data->port_id,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
fas->status);
rte_hexdump(stderr, "Error packet", v_addr,
DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));

dq_storage++;
num_rx++;
num_pulled++;
} while (pending);

dpaa2_q->err_pkts += num_rx;
}

/* This function assumes that caller will be keep the same value for nb_pkts
* across calls per queue, if that is not the case, better use non-prefetch
* version of rx call.
Expand All @@ -570,9 +658,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct qbman_pull_desc pulldesc;
struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
#if defined(RTE_LIBRTE_IEEE1588)
struct dpaa2_dev_priv *priv = eth_data->dev_private;
#endif

if (unlikely(dpaa2_enable_err_queue))
dump_err_pkts(priv->rx_err_vq);

if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
ret = dpaa2_affine_qbman_ethrx_swp();
Expand Down Expand Up @@ -807,6 +896,10 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;

if (unlikely(dpaa2_enable_err_queue))
dump_err_pkts(priv->rx_err_vq);

if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
Expand Down

0 comments on commit 4690a61

Please sign in to comment.