Skip to content

Commit 4dec64c

Browse files
minakuba-moo
authored andcommitted
page_pool: convert to use netmem
Abstract the memory type from the page_pool so we can later add support for new memory types. Convert the page_pool to use the new netmem type abstraction, rather than use struct page directly. As of this patch the netmem type is a no-op abstraction: it's always a struct page underneath. All the page pool internals are converted to use struct netmem instead of struct page, and the page pool now exports 2 APIs: 1. The existing struct page API. 2. The new struct netmem API. Keeping the existing API is transitional; we do not want to refactor all the current drivers using the page pool at once. The netmem abstraction is currently a no-op. The page_pool uses page_to_netmem() to convert allocated pages to netmem, and uses netmem_to_page() to convert the netmem back to pages to pass to mm APIs, Follow up patches to this series add non-paged netmem support to the page_pool. This change is factored out on its own to limit the code churn to this 1 patch, for ease of code review. Signed-off-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://patch.msgid.link/20240628003253.1694510-6-almasrymina@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent ac26327 commit 4dec64c

File tree

8 files changed

+287
-184
lines changed

8 files changed

+287
-184
lines changed

include/linux/skbuff_ref.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
3232
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3333
}
3434

35-
bool napi_pp_put_page(struct page *page);
35+
bool napi_pp_put_page(netmem_ref netmem);
3636

3737
static inline void
3838
skb_page_unref(struct page *page, bool recycle)
3939
{
4040
#ifdef CONFIG_PAGE_POOL
41-
if (recycle && napi_pp_put_page(page))
41+
if (recycle && napi_pp_put_page(page_to_netmem(page)))
4242
return;
4343
#endif
4444
put_page(page);

include/net/netmem.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,19 @@ static inline netmem_ref page_to_netmem(struct page *page)
3838
return (__force netmem_ref)page;
3939
}
4040

41+
static inline int netmem_ref_count(netmem_ref netmem)
42+
{
43+
return page_ref_count(netmem_to_page(netmem));
44+
}
45+
46+
static inline unsigned long netmem_to_pfn(netmem_ref netmem)
47+
{
48+
return page_to_pfn(netmem_to_page(netmem));
49+
}
50+
51+
static inline netmem_ref netmem_compound_head(netmem_ref netmem)
52+
{
53+
return page_to_netmem(compound_head(netmem_to_page(netmem)));
54+
}
55+
4156
#endif /* _NET_NETMEM_H */

include/net/page_pool/helpers.h

Lines changed: 70 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,8 @@
5555
#include <linux/dma-mapping.h>
5656

5757
#include <net/page_pool/types.h>
58+
#include <net/net_debug.h>
59+
#include <net/netmem.h>
5860

5961
#ifdef CONFIG_PAGE_POOL_STATS
6062
/* Deprecated driver-facing API, use netlink instead */
@@ -212,6 +214,11 @@ page_pool_get_dma_dir(const struct page_pool *pool)
212214
return pool->p.dma_dir;
213215
}
214216

217+
static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
218+
{
219+
atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr);
220+
}
221+
215222
/**
216223
* page_pool_fragment_page() - split a fresh page into fragments
217224
* @page: page to split
@@ -232,11 +239,12 @@ page_pool_get_dma_dir(const struct page_pool *pool)
232239
*/
233240
static inline void page_pool_fragment_page(struct page *page, long nr)
234241
{
235-
atomic_long_set(&page->pp_ref_count, nr);
242+
page_pool_fragment_netmem(page_to_netmem(page), nr);
236243
}
237244

238-
static inline long page_pool_unref_page(struct page *page, long nr)
245+
static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
239246
{
247+
struct page *page = netmem_to_page(netmem);
240248
long ret;
241249

242250
/* If nr == pp_ref_count then we have cleared all remaining
@@ -279,15 +287,41 @@ static inline long page_pool_unref_page(struct page *page, long nr)
279287
return ret;
280288
}
281289

290+
static inline long page_pool_unref_page(struct page *page, long nr)
291+
{
292+
return page_pool_unref_netmem(page_to_netmem(page), nr);
293+
}
294+
295+
static inline void page_pool_ref_netmem(netmem_ref netmem)
296+
{
297+
atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count);
298+
}
299+
282300
static inline void page_pool_ref_page(struct page *page)
283301
{
284-
atomic_long_inc(&page->pp_ref_count);
302+
page_pool_ref_netmem(page_to_netmem(page));
285303
}
286304

287-
static inline bool page_pool_is_last_ref(struct page *page)
305+
static inline bool page_pool_is_last_ref(netmem_ref netmem)
288306
{
289307
/* If page_pool_unref_page() returns 0, we were the last user */
290-
return page_pool_unref_page(page, 1) == 0;
308+
return page_pool_unref_netmem(netmem, 1) == 0;
309+
}
310+
311+
static inline void page_pool_put_netmem(struct page_pool *pool,
312+
netmem_ref netmem,
313+
unsigned int dma_sync_size,
314+
bool allow_direct)
315+
{
316+
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
317+
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
318+
*/
319+
#ifdef CONFIG_PAGE_POOL
320+
if (!page_pool_is_last_ref(netmem))
321+
return;
322+
323+
page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
324+
#endif
291325
}
292326

293327
/**
@@ -308,15 +342,15 @@ static inline void page_pool_put_page(struct page_pool *pool,
308342
unsigned int dma_sync_size,
309343
bool allow_direct)
310344
{
311-
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
312-
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
313-
*/
314-
#ifdef CONFIG_PAGE_POOL
315-
if (!page_pool_is_last_ref(page))
316-
return;
345+
page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
346+
allow_direct);
347+
}
317348

318-
page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
319-
#endif
349+
static inline void page_pool_put_full_netmem(struct page_pool *pool,
350+
netmem_ref netmem,
351+
bool allow_direct)
352+
{
353+
page_pool_put_netmem(pool, netmem, -1, allow_direct);
320354
}
321355

322356
/**
@@ -331,7 +365,7 @@ static inline void page_pool_put_page(struct page_pool *pool,
331365
static inline void page_pool_put_full_page(struct page_pool *pool,
332366
struct page *page, bool allow_direct)
333367
{
334-
page_pool_put_page(pool, page, -1, allow_direct);
368+
page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
335369
}
336370

337371
/**
@@ -365,6 +399,18 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
365399
page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
366400
}
367401

402+
static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
403+
{
404+
struct page *page = netmem_to_page(netmem);
405+
406+
dma_addr_t ret = page->dma_addr;
407+
408+
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
409+
ret <<= PAGE_SHIFT;
410+
411+
return ret;
412+
}
413+
368414
/**
369415
* page_pool_get_dma_addr() - Retrieve the stored DMA address.
370416
* @page: page allocated from a page pool
@@ -374,16 +420,14 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
374420
*/
375421
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
376422
{
377-
dma_addr_t ret = page->dma_addr;
378-
379-
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
380-
ret <<= PAGE_SHIFT;
381-
382-
return ret;
423+
return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
383424
}
384425

385-
static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
426+
static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem,
427+
dma_addr_t addr)
386428
{
429+
struct page *page = netmem_to_page(netmem);
430+
387431
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
388432
page->dma_addr = addr >> PAGE_SHIFT;
389433

@@ -419,6 +463,11 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
419463
page_pool_get_dma_dir(pool));
420464
}
421465

466+
static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
467+
{
468+
return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
469+
}
470+
422471
static inline bool page_pool_put(struct page_pool *pool)
423472
{
424473
return refcount_dec_and_test(&pool->user_cnt);

include/net/page_pool/types.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <linux/dma-direction.h>
77
#include <linux/ptr_ring.h>
88
#include <linux/types.h>
9+
#include <net/netmem.h>
910

1011
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
1112
* map/unmap
@@ -40,7 +41,7 @@
4041
#define PP_ALLOC_CACHE_REFILL 64
4142
struct pp_alloc_cache {
4243
u32 count;
43-
struct page *cache[PP_ALLOC_CACHE_SIZE];
44+
netmem_ref cache[PP_ALLOC_CACHE_SIZE];
4445
};
4546

4647
/**
@@ -73,7 +74,7 @@ struct page_pool_params {
7374
struct net_device *netdev;
7475
unsigned int flags;
7576
/* private: used by test code only */
76-
void (*init_callback)(struct page *page, void *arg);
77+
void (*init_callback)(netmem_ref netmem, void *arg);
7778
void *init_arg;
7879
);
7980
};
@@ -151,7 +152,7 @@ struct page_pool {
151152
*/
152153
__cacheline_group_begin(frag) __aligned(4 * sizeof(long));
153154
long frag_users;
154-
struct page *frag_page;
155+
netmem_ref frag_page;
155156
unsigned int frag_offset;
156157
__cacheline_group_end(frag);
157158

@@ -220,8 +221,12 @@ struct page_pool {
220221
};
221222

222223
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
224+
netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
223225
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
224226
unsigned int size, gfp_t gfp);
227+
netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
228+
unsigned int *offset, unsigned int size,
229+
gfp_t gfp);
225230
struct page_pool *page_pool_create(const struct page_pool_params *params);
226231
struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
227232
int cpuid);
@@ -252,6 +257,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
252257
}
253258
#endif
254259

260+
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
261+
unsigned int dma_sync_size,
262+
bool allow_direct);
255263
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
256264
unsigned int dma_sync_size,
257265
bool allow_direct);

include/trace/events/page_pool.h

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release,
4242
TRACE_EVENT(page_pool_state_release,
4343

4444
TP_PROTO(const struct page_pool *pool,
45-
const struct page *page, u32 release),
45+
netmem_ref netmem, u32 release),
4646

47-
TP_ARGS(pool, page, release),
47+
TP_ARGS(pool, netmem, release),
4848

4949
TP_STRUCT__entry(
5050
__field(const struct page_pool *, pool)
51-
__field(const struct page *, page)
51+
__field(unsigned long, netmem)
5252
__field(u32, release)
5353
__field(unsigned long, pfn)
5454
),
5555

5656
TP_fast_assign(
5757
__entry->pool = pool;
58-
__entry->page = page;
58+
__entry->netmem = (__force unsigned long)netmem;
5959
__entry->release = release;
60-
__entry->pfn = page_to_pfn(page);
60+
__entry->pfn = netmem_to_pfn(netmem);
6161
),
6262

63-
TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
64-
__entry->pool, __entry->page, __entry->pfn, __entry->release)
63+
TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u",
64+
__entry->pool, (void *)__entry->netmem,
65+
__entry->pfn, __entry->release)
6566
);
6667

6768
TRACE_EVENT(page_pool_state_hold,
6869

6970
TP_PROTO(const struct page_pool *pool,
70-
const struct page *page, u32 hold),
71+
netmem_ref netmem, u32 hold),
7172

72-
TP_ARGS(pool, page, hold),
73+
TP_ARGS(pool, netmem, hold),
7374

7475
TP_STRUCT__entry(
7576
__field(const struct page_pool *, pool)
76-
__field(const struct page *, page)
77+
__field(unsigned long, netmem)
7778
__field(u32, hold)
7879
__field(unsigned long, pfn)
7980
),
8081

8182
TP_fast_assign(
8283
__entry->pool = pool;
83-
__entry->page = page;
84+
__entry->netmem = (__force unsigned long)netmem;
8485
__entry->hold = hold;
85-
__entry->pfn = page_to_pfn(page);
86+
__entry->pfn = netmem_to_pfn(netmem);
8687
),
8788

88-
TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
89-
__entry->pool, __entry->page, __entry->pfn, __entry->hold)
89+
TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u",
90+
__entry->pool, (void *)__entry->netmem,
91+
__entry->pfn, __entry->hold)
9092
);
9193

9294
TRACE_EVENT(page_pool_update_nid,

net/bpf/test_run.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,9 +127,10 @@ struct xdp_test_data {
127127
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128128
#define TEST_XDP_MAX_BATCH 256
129129

130-
static void xdp_test_run_init_page(struct page *page, void *arg)
130+
static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
131131
{
132-
struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
132+
struct xdp_page_head *head =
133+
phys_to_virt(page_to_phys(netmem_to_page(netmem)));
133134
struct xdp_buff *new_ctx, *orig_ctx;
134135
u32 headroom = XDP_PACKET_HEADROOM;
135136
struct xdp_test_data *xdp = arg;

0 commit comments

Comments
 (0)