Skip to content

Commit a57a498

Browse files
JoonsooKimpenberg
authored andcommitted
slab: use __GFP_COMP flag for allocating slab pages
If we use 'struct page' of first page as 'struct slab', there is no advantage not to use __GFP_COMP. So use __GFP_COMP flag for all the cases. Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
1 parent 56f295e commit a57a498

File tree

1 file changed

+9
-34
lines changed

1 file changed

+9
-34
lines changed

mm/slab.c

Lines changed: 9 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1718,15 +1718,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
17181718
{
17191719
struct page *page;
17201720
int nr_pages;
1721-
int i;
1722-
1723-
#ifndef CONFIG_MMU
1724-
/*
1725-
* Nommu uses slab's for process anonymous memory allocations, and thus
1726-
* requires __GFP_COMP to properly refcount higher order allocations
1727-
*/
1728-
flags |= __GFP_COMP;
1729-
#endif
17301721

17311722
flags |= cachep->allocflags;
17321723
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1750,12 +1741,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
17501741
else
17511742
add_zone_page_state(page_zone(page),
17521743
NR_SLAB_UNRECLAIMABLE, nr_pages);
1753-
for (i = 0; i < nr_pages; i++) {
1754-
__SetPageSlab(page + i);
1755-
1756-
if (page->pfmemalloc)
1757-
SetPageSlabPfmemalloc(page);
1758-
}
1744+
__SetPageSlab(page);
1745+
if (page->pfmemalloc)
1746+
SetPageSlabPfmemalloc(page);
17591747
memcg_bind_pages(cachep, cachep->gfporder);
17601748

17611749
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1775,8 +1763,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
17751763
*/
17761764
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
17771765
{
1778-
unsigned long i = (1 << cachep->gfporder);
1779-
const unsigned long nr_freed = i;
1766+
const unsigned long nr_freed = (1 << cachep->gfporder);
17801767

17811768
kmemcheck_free_shadow(page, cachep->gfporder);
17821769

@@ -1787,12 +1774,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
17871774
sub_zone_page_state(page_zone(page),
17881775
NR_SLAB_UNRECLAIMABLE, nr_freed);
17891776

1777+
BUG_ON(!PageSlab(page));
17901778
__ClearPageSlabPfmemalloc(page);
1791-
while (i--) {
1792-
BUG_ON(!PageSlab(page));
1793-
__ClearPageSlab(page);
1794-
page++;
1795-
}
1779+
__ClearPageSlab(page);
17961780

17971781
memcg_release_pages(cachep, cachep->gfporder);
17981782
if (current->reclaim_state)
@@ -2362,7 +2346,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
23622346
cachep->colour = left_over / cachep->colour_off;
23632347
cachep->slab_size = slab_size;
23642348
cachep->flags = flags;
2365-
cachep->allocflags = 0;
2349+
cachep->allocflags = __GFP_COMP;
23662350
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
23672351
cachep->allocflags |= GFP_DMA;
23682352
cachep->size = size;
@@ -2729,17 +2713,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
27292713
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
27302714
struct page *page)
27312715
{
2732-
int nr_pages;
2733-
2734-
nr_pages = 1;
2735-
if (likely(!PageCompound(page)))
2736-
nr_pages <<= cache->gfporder;
2737-
2738-
do {
2739-
page->slab_cache = cache;
2740-
page->slab_page = slab;
2741-
page++;
2742-
} while (--nr_pages);
2716+
page->slab_cache = cache;
2717+
page->slab_page = slab;
27432718
}
27442719

27452720
/*

0 commit comments

Comments
 (0)