Skip to content

Commit 248f657

Browse files
leitaokuba-moo
authored andcommitted
netpoll: Optimize skb refilling on critical path
netpoll tries to refill the skb queue on every packet send, independently if packets are being consumed from the pool or not. This was particularly problematic while being called from printk(), where the operation would be done while holding the console lock. Introduce a more intelligent approach to skb queue management. Instead of constantly attempting to refill the queue, the system now defers refilling to a work queue and only triggers the workqueue when a buffer is actually dequeued. This change significantly reduces operations with the lock held. Add a work_struct to the netpoll structure for asynchronous refilling, updating find_skb() to schedule refill work only when necessary (skb is dequeued). These changes have demonstrated a 15% reduction in time spent during netpoll_send_msg operations, especially when no SKBs are not consumed from consumed from pool. When SKBs are being dequeued, the improvement is even better, around 70%, mainly because refilling the SKB pool is now happening outside of the critical patch (with console_owner lock held). Signed-off-by: Breno Leitao <leitao@debian.org> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20250304-netpoll_refill_v2-v1-1-06e2916a4642@debian.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent fca9fe1 commit 248f657

File tree

2 files changed

+14
-2
lines changed

2 files changed

+14
-2
lines changed

include/linux/netpoll.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ struct netpoll {
3333
u16 local_port, remote_port;
3434
u8 remote_mac[ETH_ALEN];
3535
struct sk_buff_head skb_pool;
36+
struct work_struct refill_wq;
3637
};
3738

3839
struct netpoll_info {

net/core/netpoll.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -284,12 +284,13 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
284284
struct sk_buff *skb;
285285

286286
zap_completion_queue();
287-
refill_skbs(np);
288287
repeat:
289288

290289
skb = alloc_skb(len, GFP_ATOMIC);
291-
if (!skb)
290+
if (!skb) {
292291
skb = skb_dequeue(&np->skb_pool);
292+
schedule_work(&np->refill_wq);
293+
}
293294

294295
if (!skb) {
295296
if (++count < 10) {
@@ -535,6 +536,7 @@ static void skb_pool_flush(struct netpoll *np)
535536
{
536537
struct sk_buff_head *skb_pool;
537538

539+
cancel_work_sync(&np->refill_wq);
538540
skb_pool = &np->skb_pool;
539541
skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
540542
}
@@ -621,6 +623,14 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
621623
}
622624
EXPORT_SYMBOL(netpoll_parse_options);
623625

626+
static void refill_skbs_work_handler(struct work_struct *work)
627+
{
628+
struct netpoll *np =
629+
container_of(work, struct netpoll, refill_wq);
630+
631+
refill_skbs(np);
632+
}
633+
624634
int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
625635
{
626636
struct netpoll_info *npinfo;
@@ -666,6 +676,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
666676

667677
/* fill up the skb queue */
668678
refill_skbs(np);
679+
INIT_WORK(&np->refill_wq, refill_skbs_work_handler);
669680

670681
/* last thing to do is link it to the net device structure */
671682
rcu_assign_pointer(ndev->npinfo, npinfo);

0 commit comments

Comments
 (0)