@@ -67,6 +67,10 @@ unsigned long totalreserve_pages __read_mostly;
6767long nr_swap_pages ;
6868int percpu_pagelist_fraction ;
6969
70+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
71+ int pageblock_order __read_mostly ;
72+ #endif
73+
7074static void __free_pages_ok (struct page * page , unsigned int order );
7175
7276/*
@@ -709,7 +713,7 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
709713
710714/*
711715 * Move the free pages in a range to the free lists of the requested type.
712- * Note that start_page and end_pages are not aligned in a MAX_ORDER_NR_PAGES
716+ * Note that start_page and end_pages are not aligned on a pageblock
713717 * boundary. If alignment is required, use move_freepages_block()
714718 */
715719int move_freepages (struct zone * zone ,
@@ -759,10 +763,10 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
759763 struct page * start_page , * end_page ;
760764
761765 start_pfn = page_to_pfn (page );
762- start_pfn = start_pfn & ~(MAX_ORDER_NR_PAGES - 1 );
766+ start_pfn = start_pfn & ~(pageblock_nr_pages - 1 );
763767 start_page = pfn_to_page (start_pfn );
764- end_page = start_page + MAX_ORDER_NR_PAGES - 1 ;
765- end_pfn = start_pfn + MAX_ORDER_NR_PAGES - 1 ;
768+ end_page = start_page + pageblock_nr_pages - 1 ;
769+ end_pfn = start_pfn + pageblock_nr_pages - 1 ;
766770
767771 /* Do not cross zone boundaries */
768772 if (start_pfn < zone -> zone_start_pfn )
@@ -826,14 +830,14 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
826830 * back for a reclaimable kernel allocation, be more
827831 * agressive about taking ownership of free pages
828832 */
829- if (unlikely (current_order >= MAX_ORDER / 2 ) ||
833+ if (unlikely (current_order >= ( pageblock_order >> 1 ) ) ||
830834 start_migratetype == MIGRATE_RECLAIMABLE ) {
831835 unsigned long pages ;
832836 pages = move_freepages_block (zone , page ,
833837 start_migratetype );
834838
835839 /* Claim the whole block if over half of it is free */
836- if (pages >= (1 << (MAX_ORDER - 2 )))
840+ if (pages >= (1 << (pageblock_order - 1 )))
837841 set_pageblock_migratetype (page ,
838842 start_migratetype );
839843
@@ -846,7 +850,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
846850 __mod_zone_page_state (zone , NR_FREE_PAGES ,
847851 - (1UL << order ));
848852
849- if (current_order == MAX_ORDER - 1 )
853+ if (current_order == pageblock_order )
850854 set_pageblock_migratetype (page ,
851855 start_migratetype );
852856
@@ -2385,7 +2389,7 @@ void build_all_zonelists(void)
23852389 * made on memory-hotadd so a system can start with mobility
23862390 * disabled and enable it later
23872391 */
2388- if (vm_total_pages < (MAX_ORDER_NR_PAGES * MIGRATE_TYPES ))
2392+ if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES ))
23892393 page_group_by_mobility_disabled = 1 ;
23902394 else
23912395 page_group_by_mobility_disabled = 0 ;
@@ -2470,7 +2474,7 @@ static inline unsigned long wait_table_bits(unsigned long size)
24702474#define LONG_ALIGN (x ) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
24712475
24722476/*
2473- * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
2477+ * Mark a number of pageblocks as MIGRATE_RESERVE. The number
24742478 * of blocks reserved is based on zone->pages_min. The memory within the
24752479 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
24762480 * higher will lead to a bigger reserve which will get freed as contiguous
@@ -2485,9 +2489,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
24852489 /* Get the start pfn, end pfn and the number of blocks to reserve */
24862490 start_pfn = zone -> zone_start_pfn ;
24872491 end_pfn = start_pfn + zone -> spanned_pages ;
2488- reserve = roundup (zone -> pages_min , MAX_ORDER_NR_PAGES ) >> (MAX_ORDER - 1 );
2492+ reserve = roundup (zone -> pages_min , pageblock_nr_pages ) >>
2493+ pageblock_order ;
24892494
2490- for (pfn = start_pfn ; pfn < end_pfn ; pfn += MAX_ORDER_NR_PAGES ) {
2495+ for (pfn = start_pfn ; pfn < end_pfn ; pfn += pageblock_nr_pages ) {
24912496 if (!pfn_valid (pfn ))
24922497 continue ;
24932498 page = pfn_to_page (pfn );
@@ -2562,7 +2567,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
25622567 * the start are marked MIGRATE_RESERVE by
25632568 * setup_zone_migrate_reserve()
25642569 */
2565- if ((pfn & (MAX_ORDER_NR_PAGES - 1 )))
2570+ if ((pfn & (pageblock_nr_pages - 1 )))
25662571 set_pageblock_migratetype (page , MIGRATE_MOVABLE );
25672572
25682573 INIT_LIST_HEAD (& page -> lru );
@@ -3266,17 +3271,17 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
32663271#ifndef CONFIG_SPARSEMEM
32673272/*
32683273 * Calculate the size of the zone->blockflags rounded to an unsigned long
3269- * Start by making sure zonesize is a multiple of MAX_ORDER-1 by rounding up
3270- * Then figure 1 NR_PAGEBLOCK_BITS worth of bits per MAX_ORDER-1 , finally
3274+ * Start by making sure zonesize is a multiple of pageblock_order by rounding
3275+ * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock , finally
32713276 * round what is now in bits to nearest long in bits, then return it in
32723277 * bytes.
32733278 */
32743279static unsigned long __init usemap_size (unsigned long zonesize )
32753280{
32763281 unsigned long usemapsize ;
32773282
3278- usemapsize = roundup (zonesize , MAX_ORDER_NR_PAGES );
3279- usemapsize = usemapsize >> ( MAX_ORDER - 1 ) ;
3283+ usemapsize = roundup (zonesize , pageblock_nr_pages );
3284+ usemapsize = usemapsize >> pageblock_order ;
32803285 usemapsize *= NR_PAGEBLOCK_BITS ;
32813286 usemapsize = roundup (usemapsize , 8 * sizeof (unsigned long ));
32823287
@@ -3298,6 +3303,27 @@ static void inline setup_usemap(struct pglist_data *pgdat,
32983303 struct zone * zone , unsigned long zonesize ) {}
32993304#endif /* CONFIG_SPARSEMEM */
33003305
3306+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3307+ /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3308+ static inline void __init set_pageblock_order (unsigned int order )
3309+ {
3310+ /* Check that pageblock_nr_pages has not already been setup */
3311+ if (pageblock_order )
3312+ return ;
3313+
3314+ /*
3315+ * Assume the largest contiguous order of interest is a huge page.
3316+ * This value may be variable depending on boot parameters on IA64
3317+ */
3318+ pageblock_order = order ;
3319+ }
3320+ #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3321+
3322+ /* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */
3323+ #define set_pageblock_order (x ) do {} while (0)
3324+
3325+ #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3326+
33013327/*
33023328 * Set up the zone data structures:
33033329 * - mark all pages reserved
@@ -3378,6 +3404,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
33783404 if (!size )
33793405 continue ;
33803406
3407+ set_pageblock_order (HUGETLB_PAGE_ORDER );
33813408 setup_usemap (pgdat , zone , size );
33823409 ret = init_currently_empty_zone (zone , zone_start_pfn ,
33833410 size , MEMMAP_EARLY );
@@ -4375,15 +4402,15 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
43754402{
43764403#ifdef CONFIG_SPARSEMEM
43774404 pfn &= (PAGES_PER_SECTION - 1 );
4378- return (pfn >> ( MAX_ORDER - 1 ) ) * NR_PAGEBLOCK_BITS ;
4405+ return (pfn >> pageblock_order ) * NR_PAGEBLOCK_BITS ;
43794406#else
43804407 pfn = pfn - zone -> zone_start_pfn ;
4381- return (pfn >> ( MAX_ORDER - 1 ) ) * NR_PAGEBLOCK_BITS ;
4408+ return (pfn >> pageblock_order ) * NR_PAGEBLOCK_BITS ;
43824409#endif /* CONFIG_SPARSEMEM */
43834410}
43844411
43854412/**
4386- * get_pageblock_flags_group - Return the requested group of flags for the MAX_ORDER_NR_PAGES block of pages
4413+ * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
43874414 * @page: The page within the block of interest
43884415 * @start_bitidx: The first bit of interest to retrieve
43894416 * @end_bitidx: The last bit of interest
@@ -4411,7 +4438,7 @@ unsigned long get_pageblock_flags_group(struct page *page,
44114438}
44124439
44134440/**
4414- * set_pageblock_flags_group - Set the requested group of flags for a MAX_ORDER_NR_PAGES block of pages
4441+ * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
44154442 * @page: The page within the block of interest
44164443 * @start_bitidx: The first bit of interest
44174444 * @end_bitidx: The last bit of interest
0 commit comments