Skip to content

Commit

Permalink
octeontx2-pf: Add XDP support to netdev PF
Browse files Browse the repository at this point in the history
Adds XDP_PASS, XDP_TX, XDP_DROP and XDP_REDIRECT support
for netdev PF.

Signed-off-by: Geetha sowjanya <gakula@marvell.com>
Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Geetha sowjanya authored and davem330 committed Sep 30, 2021
1 parent 85212a1 commit 06059a1
Show file tree
Hide file tree
Showing 6 changed files with 322 additions and 34 deletions.
35 changes: 22 additions & 13 deletions drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -718,7 +718,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;

ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
Expand Down Expand Up @@ -835,17 +835,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;

err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
if (err)
return err;
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
if (err)
return err;
}

sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;

if (pfvf->ptp) {
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
Expand All @@ -871,20 +873,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
int err, pool_id;

cq = &qset->cq[qidx];
cq->cq_idx = qidx;
non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
} else {
if (pfvf->xdp_prog)
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
} else {
cq->cq_type = CQ_XDP;
cq->cint_idx = qidx - non_xdp_queues;
cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;

Expand Down Expand Up @@ -991,7 +1000,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}

/* Initialize TX queues */
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);

err = otx2_sq_init(pfvf, qidx, sqb_aura);
Expand Down Expand Up @@ -1038,7 +1047,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)

/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
nixlf->sq_cnt = pfvf->hw.tx_queues;
nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
Expand Down Expand Up @@ -1076,7 +1085,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;

for (qidx = 0; qidx < hw->tx_queues; qidx++) {
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
Expand Down Expand Up @@ -1288,7 +1297,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;

for (qidx = 0; qidx < hw->tx_queues; qidx++) {
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
Expand All @@ -1308,7 +1317,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;

/* Allocate pointers and free them to aura/pool */
for (qidx = 0; qidx < hw->tx_queues; qidx++) {
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];

Expand Down
4 changes: 4 additions & 0 deletions drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
u16 xdp_queues;
u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
Expand Down Expand Up @@ -345,6 +347,7 @@ struct otx2_nic {
u64 flags;
u64 *cq_op_addr;

struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
Expand Down Expand Up @@ -857,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
Expand Down
135 changes: 130 additions & 5 deletions drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>

#include "otx2_reg.h"
#include "otx2_common.h"
Expand Down Expand Up @@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);

static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;

if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
netdev->mtu);
return -EINVAL;
}
if (if_up)
otx2_stop(netdev);

Expand Down Expand Up @@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}

/* SQ */
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
Expand Down Expand Up @@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
Expand Down Expand Up @@ -1332,7 +1340,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
hw->sqpool_cnt = hw->tx_queues;
hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;

pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
Expand Down Expand Up @@ -1541,7 +1549,7 @@ int otx2_open(struct net_device *netdev)

netif_carrier_off(netdev);

pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
Expand All @@ -1561,7 +1569,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;

qset->sq = kcalloc(pf->hw.tx_queues,
qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
Expand All @@ -1582,11 +1590,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
* 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
if (pf->xdp_prog)
cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
(qidx + pf->hw.rx_queues +
pf->hw.tx_queues) :
CINT_INVALID_CQ;
else
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;

cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
Expand Down Expand Up @@ -2291,6 +2308,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
return 0;
}

static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
struct page *page;
u64 dma_addr;
int err = 0;

dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
offset_in_page(xdpf->data), xdpf->len,
DMA_TO_DEVICE);
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;

err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
page = virt_to_page(xdpf->data);
put_page(page);
return -ENOMEM;
}
return 0;
}

static int otx2_xdp_xmit(struct net_device *netdev, int n,
struct xdp_frame **frames, u32 flags)
{
struct otx2_nic *pf = netdev_priv(netdev);
int qidx = smp_processor_id();
struct otx2_snd_queue *sq;
int drops = 0, i;

if (!netif_running(netdev))
return -ENETDOWN;

qidx += pf->hw.tx_queues;
sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;

/* Abort xmit if xdp queue is not */
if (unlikely(!sq))
return -ENXIO;

if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;

for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;

err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
if (err)
drops++;
}
return n - drops;
}

static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
{
struct net_device *dev = pf->netdev;
bool if_up = netif_running(pf->netdev);
struct bpf_prog *old_prog;

if (prog && dev->mtu > MAX_XDP_MTU) {
netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
return -EOPNOTSUPP;
}

if (if_up)
otx2_stop(pf->netdev);

old_prog = xchg(&pf->xdp_prog, prog);

if (old_prog)
bpf_prog_put(old_prog);

if (pf->xdp_prog)
bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);

/* Network stack and XDP shared same rx queues.
* Use separate tx queues for XDP and network stack.
*/
if (pf->xdp_prog)
pf->hw.xdp_queues = pf->hw.rx_queues;
else
pf->hw.xdp_queues = 0;

pf->hw.tot_tx_queues += pf->hw.xdp_queues;

if (if_up)
otx2_open(pf->netdev);

return 0;
}

static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct otx2_nic *pf = netdev_priv(netdev);

switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
default:
return -EINVAL;
}
}

static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
int req_perm)
{
Expand Down Expand Up @@ -2358,6 +2480,8 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
Expand Down Expand Up @@ -2499,6 +2623,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->tot_tx_queues = qcount;
hw->max_queues = qcount;

num_vec = pci_msix_vec_count(pdev);
Expand Down
Loading

0 comments on commit 06059a1

Please sign in to comment.