Skip to content

Commit d85f338

Browse files
Christoph LameterLinus Torvalds
authored andcommitted
Make page->private usable in compound pages
If we add a new flag so that we can distinguish between the first page and the tail pages then we can avoid to use page->private in the first page. page->private == page for the first page, so there is no real information in there. Freeing up page->private makes the use of compound pages more transparent. They become more usable like real pages. Right now we have to be careful f.e. if we are going beyond PAGE_SIZE allocations in the slab on i386 because we can then no longer use the private field. This is one of the issues that cause us not to support debugging for page size slabs in SLAB. Having page->private available for SLUB would allow more meta information in the page struct. I can probably avoid the 16 bit ints that I have in there right now. Also if page->private is available then a compound page may be equipped with buffer heads. This may free up the way for filesystems to support larger blocks than page size. We add PageTail as an alias of PageReclaim. Compound pages cannot currently be reclaimed. Because of the alias one needs to check PageCompound first. The RFC for the this approach was discussed at http://marc.info/?t=117574302800001&r=1&w=2 [nacc@us.ibm.com: fix hugetlbfs] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 3052086 commit d85f338

File tree

9 files changed

+72
-37
lines changed

9 files changed

+72
-37
lines changed

arch/ia64/mm/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ lazy_mmu_prot_update (pte_t pte)
121121
return; /* i-cache is already coherent with d-cache */
122122

123123
if (PageCompound(page)) {
124-
order = (unsigned long) (page[1].lru.prev);
124+
order = compound_order(page);
125125
flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
126126
}
127127
else

fs/hugetlbfs/inode.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ static int hugetlbfs_symlink(struct inode *dir,
450450
*/
451451
static int hugetlbfs_set_page_dirty(struct page *page)
452452
{
453-
struct page *head = (struct page *)page_private(page);
453+
struct page *head = compound_head(page);
454454

455455
SetPageDirty(head);
456456
return 0;

include/linux/mm.h

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -267,17 +267,28 @@ static inline int get_page_unless_zero(struct page *page)
267267
return atomic_inc_not_zero(&page->_count);
268268
}
269269

270+
static inline struct page *compound_head(struct page *page)
271+
{
272+
/*
273+
* We could avoid the PageCompound(page) check if
274+
* we would not overload PageTail().
275+
*
276+
* This check has to be done in several performance critical
277+
* paths of the slab etc. IMHO PageTail deserves its own flag.
278+
*/
279+
if (unlikely(PageCompound(page) && PageTail(page)))
280+
return page->first_page;
281+
return page;
282+
}
283+
270284
static inline int page_count(struct page *page)
271285
{
272-
if (unlikely(PageCompound(page)))
273-
page = (struct page *)page_private(page);
274-
return atomic_read(&page->_count);
286+
return atomic_read(&compound_head(page)->_count);
275287
}
276288

277289
static inline void get_page(struct page *page)
278290
{
279-
if (unlikely(PageCompound(page)))
280-
page = (struct page *)page_private(page);
291+
page = compound_head(page);
281292
VM_BUG_ON(atomic_read(&page->_count) == 0);
282293
atomic_inc(&page->_count);
283294
}
@@ -314,6 +325,18 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
314325
return (compound_page_dtor *)page[1].lru.next;
315326
}
316327

328+
static inline int compound_order(struct page *page)
329+
{
330+
if (!PageCompound(page) || PageTail(page))
331+
return 0;
332+
return (unsigned long)page[1].lru.prev;
333+
}
334+
335+
static inline void set_compound_order(struct page *page, unsigned long order)
336+
{
337+
page[1].lru.prev = (void *)order;
338+
}
339+
317340
/*
318341
* Multiple processes may "see" the same page. E.g. for untouched
319342
* mappings of /dev/null, all processes see the same page full of

include/linux/page-flags.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,12 @@
9494
/* PG_owner_priv_1 users should have descriptive aliases */
9595
#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
9696

97+
/*
98+
* Marks tail portion of a compound page. We currently do not reclaim
99+
* compound pages so we can reuse a flag only used for reclaim here.
100+
*/
101+
#define PG_tail PG_reclaim
102+
97103
#if (BITS_PER_LONG > 32)
98104
/*
99105
* 64-bit-only flags build down from bit 31
@@ -241,6 +247,14 @@ static inline void SetPageUptodate(struct page *page)
241247
#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
242248
#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
243249

250+
/*
251+
* Note: PG_tail is an alias of another page flag. The result of PageTail()
252+
* is only valid if PageCompound(page) is true.
253+
*/
254+
#define PageTail(page) test_bit(PG_tail, &(page)->flags)
255+
#define __SetPageTail(page) __set_bit(PG_tail, &(page)->flags)
256+
#define __ClearPageTail(page) __clear_bit(PG_tail, &(page)->flags)
257+
244258
#ifdef CONFIG_SWAP
245259
#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
246260
#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v)
2424
*/
2525
static inline void set_page_refcounted(struct page *page)
2626
{
27-
VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page);
27+
VM_BUG_ON(PageCompound(page) && PageTail(page));
2828
VM_BUG_ON(atomic_read(&page->_count));
2929
set_page_count(page, 1);
3030
}

mm/page_alloc.c

Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ static void bad_page(struct page *page)
225225

226226
static void free_compound_page(struct page *page)
227227
{
228-
__free_pages_ok(page, (unsigned long)page[1].lru.prev);
228+
__free_pages_ok(page, compound_order(page));
229229
}
230230

231231
static void prep_compound_page(struct page *page, unsigned long order)
@@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order)
234234
int nr_pages = 1 << order;
235235

236236
set_compound_page_dtor(page, free_compound_page);
237-
page[1].lru.prev = (void *)order;
238-
for (i = 0; i < nr_pages; i++) {
237+
set_compound_order(page, order);
238+
__SetPageCompound(page);
239+
for (i = 1; i < nr_pages; i++) {
239240
struct page *p = page + i;
240241

242+
__SetPageTail(p);
241243
__SetPageCompound(p);
242-
set_page_private(p, (unsigned long)page);
244+
p->first_page = page;
243245
}
244246
}
245247

@@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order)
248250
int i;
249251
int nr_pages = 1 << order;
250252

251-
if (unlikely((unsigned long)page[1].lru.prev != order))
253+
if (unlikely(compound_order(page) != order))
252254
bad_page(page);
253255

254-
for (i = 0; i < nr_pages; i++) {
256+
if (unlikely(!PageCompound(page)))
257+
bad_page(page);
258+
__ClearPageCompound(page);
259+
for (i = 1; i < nr_pages; i++) {
255260
struct page *p = page + i;
256261

257-
if (unlikely(!PageCompound(p) |
258-
(page_private(p) != (unsigned long)page)))
262+
if (unlikely(!PageCompound(p) | !PageTail(p) |
263+
(p->first_page != page)))
259264
bad_page(page);
265+
__ClearPageTail(p);
260266
__ClearPageCompound(p);
261267
}
262268
}
@@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page)
429435
1 << PG_private |
430436
1 << PG_locked |
431437
1 << PG_active |
432-
1 << PG_reclaim |
433438
1 << PG_slab |
434439
1 << PG_swapcache |
435440
1 << PG_writeback |
436441
1 << PG_reserved |
437442
1 << PG_buddy ))))
438443
bad_page(page);
444+
/*
445+
* PageReclaim == PageTail. It is only an error
446+
* for PageReclaim to be set if PageCompound is clear.
447+
*/
448+
if (unlikely(!PageCompound(page) && PageReclaim(page)))
449+
bad_page(page);
439450
if (PageDirty(page))
440451
__ClearPageDirty(page);
441452
/*

mm/slab.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
602602

603603
static inline struct kmem_cache *page_get_cache(struct page *page)
604604
{
605-
if (unlikely(PageCompound(page)))
606-
page = (struct page *)page_private(page);
605+
page = compound_head(page);
607606
BUG_ON(!PageSlab(page));
608607
return (struct kmem_cache *)page->lru.next;
609608
}
@@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
615614

616615
static inline struct slab *page_get_slab(struct page *page)
617616
{
618-
if (unlikely(PageCompound(page)))
619-
page = (struct page *)page_private(page);
617+
page = compound_head(page);
620618
BUG_ON(!PageSlab(page));
621619
return (struct slab *)page->lru.prev;
622620
}

mm/slub.c

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
13251325

13261326
page = virt_to_page(x);
13271327

1328-
if (unlikely(PageCompound(page)))
1329-
page = page->first_page;
1330-
1328+
page = compound_head(page);
13311329

13321330
if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
13331331
set_tracking(s, x, TRACK_FREE);
@@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free);
13381336
/* Figure out on which slab object the object resides */
13391337
static struct page *get_object_page(const void *x)
13401338
{
1341-
struct page *page = virt_to_page(x);
1342-
1343-
if (unlikely(PageCompound(page)))
1344-
page = page->first_page;
1339+
struct page *page = compound_head(virt_to_page(x));
13451340

13461341
if (!PageSlab(page))
13471342
return NULL;
@@ -2081,10 +2076,7 @@ void kfree(const void *x)
20812076
if (!x)
20822077
return;
20832078

2084-
page = virt_to_page(x);
2085-
2086-
if (unlikely(PageCompound(page)))
2087-
page = page->first_page;
2079+
page = compound_head(virt_to_page(x));
20882080

20892081
s = page->slab;
20902082

@@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
21202112
return NULL;
21212113
}
21222114

2123-
page = virt_to_page(p);
2124-
2125-
if (unlikely(PageCompound(page)))
2126-
page = page->first_page;
2115+
page = compound_head(virt_to_page(p));
21272116

21282117
new_cache = get_slab(new_size, flags);
21292118

mm/swap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page)
5555

5656
static void put_compound_page(struct page *page)
5757
{
58-
page = (struct page *)page_private(page);
58+
page = compound_head(page);
5959
if (put_page_testzero(page)) {
6060
compound_page_dtor *dtor;
6161

0 commit comments

Comments
 (0)