Skip to content

Commit ca0e014

Browse files
LorenzoBianconidavem330
authored andcommitted
net: mvneta: move skb build after descriptors processing
Move skb build after all descriptors processing. This is a preliminary patch to enable multi-buffers and JUMBO frames support for XDP. Introduce mvneta_xdp_put_buff routine to release all pages used by a XDP multi-buffer Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 2f0bc54 commit ca0e014

File tree

1 file changed

+101
-57
lines changed

1 file changed

+101
-57
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 101 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -2026,6 +2026,20 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
20262026
return i;
20272027
}
20282028

2029+
static void
2030+
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2031+
struct xdp_buff *xdp, int sync_len, bool napi)
2032+
{
2033+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2034+
int i;
2035+
2036+
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2037+
sync_len, napi);
2038+
for (i = 0; i < sinfo->nr_frags; i++)
2039+
page_pool_put_full_page(rxq->page_pool,
2040+
skb_frag_page(&sinfo->frags[i]), napi);
2041+
}
2042+
20292043
static int
20302044
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
20312045
struct xdp_frame *xdpf, bool dma_map)
@@ -2229,6 +2243,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
22292243
int data_len = -MVNETA_MH_SIZE, len;
22302244
struct net_device *dev = pp->dev;
22312245
enum dma_data_direction dma_dir;
2246+
struct skb_shared_info *sinfo;
22322247
int ret = 0;
22332248

22342249
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
@@ -2252,35 +2267,13 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
22522267
xdp->data_end = xdp->data + data_len;
22532268
xdp_set_data_meta_invalid(xdp);
22542269

2255-
if (xdp_prog) {
2256-
ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
2257-
if (ret)
2258-
goto out;
2259-
}
2260-
2261-
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2262-
if (unlikely(!rxq->skb)) {
2263-
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2270+
sinfo = xdp_get_shared_info_from_buff(xdp);
2271+
sinfo->nr_frags = 0;
22642272

2265-
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
2266-
2267-
u64_stats_update_begin(&stats->syncp);
2268-
stats->es.skb_alloc_error++;
2269-
stats->rx_dropped++;
2270-
u64_stats_update_end(&stats->syncp);
2271-
2272-
return -ENOMEM;
2273-
}
2274-
page_pool_release_page(rxq->page_pool, page);
2275-
2276-
skb_reserve(rxq->skb,
2277-
xdp->data - xdp->data_hard_start);
2278-
skb_put(rxq->skb, xdp->data_end - xdp->data);
2279-
mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
2273+
if (xdp_prog)
2274+
ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
22802275

22812276
rxq->left_size = rx_desc->data_size - len;
2282-
2283-
out:
22842277
rx_desc->buf_phys_addr = 0;
22852278

22862279
return ret;
@@ -2290,8 +2283,10 @@ static void
22902283
mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
22912284
struct mvneta_rx_desc *rx_desc,
22922285
struct mvneta_rx_queue *rxq,
2286+
struct xdp_buff *xdp,
22932287
struct page *page)
22942288
{
2289+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
22952290
struct net_device *dev = pp->dev;
22962291
enum dma_data_direction dma_dir;
22972292
int data_len, len;
@@ -2307,41 +2302,79 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
23072302
dma_sync_single_for_cpu(dev->dev.parent,
23082303
rx_desc->buf_phys_addr,
23092304
len, dma_dir);
2310-
if (data_len > 0) {
2311-
/* refill descriptor with new buffer later */
2312-
skb_add_rx_frag(rxq->skb,
2313-
skb_shinfo(rxq->skb)->nr_frags,
2314-
page, pp->rx_offset_correction, data_len,
2315-
PAGE_SIZE);
2316-
}
2317-
page_pool_release_page(rxq->page_pool, page);
2318-
rx_desc->buf_phys_addr = 0;
2305+
2306+
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
2307+
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
2308+
2309+
skb_frag_off_set(frag, pp->rx_offset_correction);
2310+
skb_frag_size_set(frag, data_len);
2311+
__skb_frag_set_page(frag, page);
2312+
sinfo->nr_frags++;
2313+
2314+
rx_desc->buf_phys_addr = 0;
2315+
}
23192316
rxq->left_size -= len;
23202317
}
23212318

2319+
static struct sk_buff *
2320+
mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2321+
struct xdp_buff *xdp, u32 desc_status)
2322+
{
2323+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2324+
int i, num_frags = sinfo->nr_frags;
2325+
skb_frag_t frags[MAX_SKB_FRAGS];
2326+
struct sk_buff *skb;
2327+
2328+
memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
2329+
2330+
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2331+
if (!skb)
2332+
return ERR_PTR(-ENOMEM);
2333+
2334+
page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
2335+
2336+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
2337+
skb_put(skb, xdp->data_end - xdp->data);
2338+
mvneta_rx_csum(pp, desc_status, skb);
2339+
2340+
for (i = 0; i < num_frags; i++) {
2341+
struct page *page = skb_frag_page(&frags[i]);
2342+
2343+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2344+
page, skb_frag_off(&frags[i]),
2345+
skb_frag_size(&frags[i]), PAGE_SIZE);
2346+
page_pool_release_page(rxq->page_pool, page);
2347+
}
2348+
2349+
return skb;
2350+
}
2351+
23222352
/* Main rx processing when using software buffer management */
23232353
static int mvneta_rx_swbm(struct napi_struct *napi,
23242354
struct mvneta_port *pp, int budget,
23252355
struct mvneta_rx_queue *rxq)
23262356
{
23272357
int rx_proc = 0, rx_todo, refill;
23282358
struct net_device *dev = pp->dev;
2359+
struct xdp_buff xdp_buf = {
2360+
.frame_sz = PAGE_SIZE,
2361+
.rxq = &rxq->xdp_rxq,
2362+
};
23292363
struct mvneta_stats ps = {};
23302364
struct bpf_prog *xdp_prog;
2331-
struct xdp_buff xdp_buf;
2365+
u32 desc_status;
23322366

23332367
/* Get number of received packets */
23342368
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
23352369

23362370
rcu_read_lock();
23372371
xdp_prog = READ_ONCE(pp->xdp_prog);
2338-
xdp_buf.rxq = &rxq->xdp_rxq;
2339-
xdp_buf.frame_sz = PAGE_SIZE;
23402372

23412373
/* Fairness NAPI loop */
23422374
while (rx_proc < budget && rx_proc < rx_todo) {
23432375
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
23442376
u32 rx_status, index;
2377+
struct sk_buff *skb;
23452378
struct page *page;
23462379

23472380
index = rx_desc - rxq->descs;
@@ -2357,49 +2390,60 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
23572390
/* Check errors only for FIRST descriptor */
23582391
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
23592392
mvneta_rx_error(pp, rx_desc);
2360-
/* leave the descriptor untouched */
2361-
continue;
2393+
goto next;
23622394
}
23632395

23642396
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
23652397
xdp_prog, page, &ps);
23662398
if (err)
23672399
continue;
2400+
2401+
desc_status = rx_desc->status;
23682402
} else {
2369-
if (unlikely(!rxq->skb)) {
2370-
pr_debug("no skb for rx_status 0x%x\n",
2371-
rx_status);
2403+
if (unlikely(!xdp_buf.data_hard_start))
23722404
continue;
2373-
}
2374-
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
2405+
2406+
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2407+
page);
23752408
} /* Middle or Last descriptor */
23762409

23772410
if (!(rx_status & MVNETA_RXD_LAST_DESC))
23782411
/* no last descriptor this time */
23792412
continue;
23802413

23812414
if (rxq->left_size) {
2382-
pr_err("get last desc, but left_size (%d) != 0\n",
2383-
rxq->left_size);
2384-
dev_kfree_skb_any(rxq->skb);
23852415
rxq->left_size = 0;
2386-
rxq->skb = NULL;
2387-
continue;
2416+
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2417+
goto next;
23882418
}
23892419

2390-
ps.rx_bytes += rxq->skb->len;
2391-
ps.rx_packets++;
2420+
skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
2421+
if (IS_ERR(skb)) {
2422+
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
23922423

2393-
/* Linux processing */
2394-
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
2424+
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2425+
2426+
u64_stats_update_begin(&stats->syncp);
2427+
stats->es.skb_alloc_error++;
2428+
stats->rx_dropped++;
2429+
u64_stats_update_end(&stats->syncp);
2430+
2431+
goto next;
2432+
}
23952433

2396-
napi_gro_receive(napi, rxq->skb);
2434+
ps.rx_bytes += skb->len;
2435+
ps.rx_packets++;
23972436

2398-
/* clean uncomplete skb pointer in queue */
2399-
rxq->skb = NULL;
2437+
skb->protocol = eth_type_trans(skb, dev);
2438+
napi_gro_receive(napi, skb);
2439+
next:
2440+
xdp_buf.data_hard_start = NULL;
24002441
}
24012442
rcu_read_unlock();
24022443

2444+
if (xdp_buf.data_hard_start)
2445+
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2446+
24032447
if (ps.xdp_redirect)
24042448
xdp_do_flush_map();
24052449

0 commit comments

Comments
 (0)