Skip to content

Commit

Permalink
net:page_pool: split page_pool.h
Browse files Browse the repository at this point in the history
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
  • Loading branch information
Yunsheng Lin committed Jul 16, 2023
1 parent 8595eaa commit 11ac8c1
Show file tree
Hide file tree
Showing 3 changed files with 222 additions and 213 deletions.
2 changes: 1 addition & 1 deletion include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#include <linux/if_packet.h>
#include <linux/llist.h>
#include <net/flow.h>
#include <net/page_pool.h>
#include <net/page_pool_types.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
Expand Down
214 changes: 2 additions & 212 deletions include/net/page_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,187 +30,7 @@
#ifndef _NET_PAGE_POOL_H
#define _NET_PAGE_POOL_H

#include <linux/mm.h> /* Needed by ptr_ring */
#include <linux/ptr_ring.h>
#include <linux/dma-direction.h>

#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
*/
#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
* from page_pool will be
* DMA-synced-for-device according to
* the length provided by the device
* driver.
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
PP_FLAG_DMA_SYNC_DEV |\
PP_FLAG_PAGE_FRAG)

/*
* Fast allocation side cache array/stack
*
* The cache size and refill watermark is related to the network
* use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
* ring is usually refilled and the max consumed elements will be 64,
* thus a natural max size of objects needed in the cache.
*
* Keeping room for more objects, is due to XDP_DROP use-case. As
* XDP_DROP allows the opportunity to recycle objects directly into
* this array, as it shares the same softirq/NAPI protection. If
* cache is already full (or partly full) then the XDP_DROP recycles
* would have to take a slower code path.
*/
#define PP_ALLOC_CACHE_SIZE 128
#define PP_ALLOC_CACHE_REFILL 64
struct pp_alloc_cache {
u32 count;
struct page *cache[PP_ALLOC_CACHE_SIZE];
};

struct page_pool_params {
unsigned int flags;
unsigned int order;
unsigned int pool_size;
int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */
struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
enum dma_data_direction dma_dir; /* DMA mapping direction */
unsigned int max_len; /* max DMA sync memory size */
unsigned int offset; /* DMA addr offset */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
};

#ifdef CONFIG_PAGE_POOL_STATS
struct page_pool_alloc_stats {
u64 fast; /* fast path allocations */
u64 slow; /* slow-path order 0 allocations */
u64 slow_high_order; /* slow-path high order allocations */
u64 empty; /* failed refills due to empty ptr ring, forcing
* slow path allocation
*/
u64 refill; /* allocations via successful refill */
u64 waive; /* failed refills due to numa zone mismatch */
};

struct page_pool_recycle_stats {
u64 cached; /* recycling placed page in the cache. */
u64 cache_full; /* cache was full */
u64 ring; /* recycling placed page back into ptr ring */
u64 ring_full; /* page was released from page-pool because
* PTR ring was full.
*/
u64 released_refcnt; /* page released because of elevated
* refcnt
*/
};

/* This struct wraps the above stats structs so users of the
* page_pool_get_stats API can pass a single argument when requesting the
* stats for the page pool.
*/
struct page_pool_stats {
struct page_pool_alloc_stats alloc_stats;
struct page_pool_recycle_stats recycle_stats;
};

int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);

/*
* Drivers that wish to harvest page pool stats and report them to users
* (perhaps via ethtool, debugfs, or another mechanism) can allocate a
* struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
*/
bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats);
#else

static inline int page_pool_ethtool_stats_get_count(void)
{
return 0;
}

static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
{
return data;
}

static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
{
return data;
}

#endif

struct page_pool {
struct page_pool_params p;

struct delayed_work release_dw;
void (*disconnect)(void *);
unsigned long defer_start;
unsigned long defer_warn;

u32 pages_state_hold_cnt;
unsigned int frag_offset;
struct page *frag_page;
long frag_users;

#ifdef CONFIG_PAGE_POOL_STATS
/* these stats are incremented while in softirq context */
struct page_pool_alloc_stats alloc_stats;
#endif
u32 xdp_mem_id;

/*
* Data structure for allocation side
*
* Drivers allocation side usually already perform some kind
* of resource protection. Piggyback on this protection, and
* require driver to protect allocation side.
*
* For NIC drivers this means, allocate a page_pool per
* RX-queue. As the RX-queue is already protected by
* Softirq/BH scheduling and napi_schedule. NAPI schedule
* guarantee that a single napi_struct will only be scheduled
* on a single CPU (see napi_schedule).
*/
struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;

/* Data structure for storing recycled pages.
*
* Returning/freeing pages is more complicated synchronization
* wise, because free's can happen on remote CPUs, with no
* association with allocation resource.
*
* Use ptr_ring, as it separates consumer and producer
* effeciently, it a way that doesn't bounce cache-lines.
*
* TODO: Implement bulk return pages into this structure.
*/
struct ptr_ring ring;

#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
#endif
atomic_t pages_state_release_cnt;

/* A page_pool is strictly tied to a single RX-queue being
* protected by NAPI, due to above pp_alloc_cache. This
* refcnt serves purpose is to simplify drivers error handling.
*/
refcount_t user_cnt;

u64 destroy_cnt;
};

struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
#include <net/page_pool_types.h>

static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
{
Expand All @@ -219,9 +39,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
return page_pool_alloc_pages(pool, gfp);
}

struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);

static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
unsigned int *offset,
unsigned int size)
Expand All @@ -240,21 +57,9 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
return pool->p.dma_dir;
}

bool page_pool_return_skb_page(struct page *page, bool napi_safe);

struct page_pool *page_pool_create(const struct page_pool_params *params);

struct xdp_mem_info;

#ifdef CONFIG_PAGE_POOL
void page_pool_unlink_napi(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem);
void page_pool_release_page(struct page_pool *pool, struct page *page);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else
#ifndef CONFIG_PAGE_POOL
static inline void page_pool_unlink_napi(struct page_pool *pool)
{
}
Expand All @@ -279,10 +84,6 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
}
#endif

void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);

/* pp_frag_count represents the number of writers who can update the page
* either by updating skb->data or via DMA mappings for the device.
* We can't rely on the page refcnt for that as we don't know who might be
Expand Down Expand Up @@ -377,22 +178,11 @@ static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
page->dma_addr_upper = upper_32_bits(addr);
}

static inline bool is_page_pool_compiled_in(void)
{
#ifdef CONFIG_PAGE_POOL
return true;
#else
return false;
#endif
}

static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
}

/* Caller must provide appropriate safe context, e.g. NAPI. */
void page_pool_update_nid(struct page_pool *pool, int new_nid);
static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
{
if (unlikely(pool->p.nid != new_nid))
Expand Down

0 comments on commit 11ac8c1

Please sign in to comment.