Skip to content
Permalink
Browse files
mm: Fix struct page layout on 32-bit systems
32-bit architectures which expect 8-byte alignment for 8-byte integers
and need 64-bit DMA addresses (arc, arm, mips, ppc) had their struct
page inadvertently expanded in 2019.  When the dma_addr_t was added,
it forced the alignment of the union to 8 bytes, which inserted a 4 byte
gap between 'flags' and the union.

Fix this by storing the dma_addr_t in one or two adjacent unsigned longs.
This restores the alignment to that of an unsigned long, and also fixes a
potential problem where (on a big endian platform), the bit used to denote
PageTail could inadvertently get set, and a racing get_user_pages_fast()
could dereference a bogus compound_head().

Fixes: c25fff7 ("mm: add dma_addr_t to struct page")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and intel-lab-lkp committed Apr 16, 2021
1 parent 5e46d1b commit 898e155048088be20b2606575a24108eacc4c91b
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 8 deletions.
@@ -97,10 +97,10 @@ struct page {
};
struct { /* page_pool used by netstack */
/**
* @dma_addr: might require a 64-bit value even on
* @dma_addr: might require a 64-bit value on
* 32-bit architectures.
*/
dma_addr_t dma_addr;
unsigned long dma_addr[2];
};
struct { /* slab, slob and slub */
union {
@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,

static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
return page->dma_addr;
dma_addr_t ret = page->dma_addr[0];
if (sizeof(dma_addr_t) > sizeof(unsigned long))
ret |= (dma_addr_t)page->dma_addr[1] << 32;
return ret;
}

static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr[0] = addr;
if (sizeof(dma_addr_t) > sizeof(unsigned long))
page->dma_addr[1] = addr >> 32;
}

static inline bool is_page_pool_compiled_in(void)
@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
struct page *page,
unsigned int dma_sync_size)
{
dma_addr_t dma_addr = page_pool_get_dma_addr(page);

dma_sync_size = min(dma_sync_size, pool->p.max_len);
dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
dma_sync_single_range_for_device(pool->p.dev, dma_addr,
pool->p.offset, dma_sync_size,
pool->p.dma_dir);
}
@@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
return NULL;
}
page->dma_addr = dma;
page_pool_set_dma_addr(page, dma);

if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
@@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
*/
goto skip_dma_unmap;

dma = page->dma_addr;
dma = page_pool_get_dma_addr(page);

/* When page is unmapped, it cannot be returned our pool */
/* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
page->dma_addr = 0;
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.

0 comments on commit 898e155

Please sign in to comment.