@@ -158,7 +158,6 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
158158EXPORT_SYMBOL (nr_node_ids );
159159#endif
160160
161- #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
162161int page_group_by_mobility_disabled __read_mostly ;
163162
164163static inline int get_pageblock_migratetype (struct page * page )
@@ -192,22 +191,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
192191 ((gfp_flags & __GFP_RECLAIMABLE ) != 0 );
193192}
194193
195- #else
196- static inline int get_pageblock_migratetype (struct page * page )
197- {
198- return MIGRATE_UNMOVABLE ;
199- }
200-
201- static void set_pageblock_migratetype (struct page * page , int migratetype )
202- {
203- }
204-
205- static inline int allocflags_to_migratetype (gfp_t gfp_flags , int order )
206- {
207- return MIGRATE_UNMOVABLE ;
208- }
209- #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
210-
211194#ifdef CONFIG_DEBUG_VM
212195static int page_outside_zone_boundaries (struct zone * zone , struct page * page )
213196{
@@ -718,7 +701,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
718701}
719702
720703
721- #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
722704/*
723705 * This array describes the order lists are fallen back to when
724706 * the free lists for the desirable migrate type are depleted
@@ -750,7 +732,7 @@ int move_freepages(struct zone *zone,
750732 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
751733 * anyway as we check zone boundaries in move_freepages_block().
752734 * Remove at a later date when no bug reports exist related to
753- * CONFIG_PAGE_GROUP_BY_MOBILITY
735+ * grouping pages by mobility
754736 */
755737 BUG_ON (page_zone (start_page ) != page_zone (end_page ));
756738#endif
@@ -899,13 +881,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
899881 /* Use MIGRATE_RESERVE rather than fail an allocation */
900882 return __rmqueue_smallest (zone , order , MIGRATE_RESERVE );
901883}
902- #else
903- static struct page * __rmqueue_fallback (struct zone * zone , int order ,
904- int start_migratetype )
905- {
906- return NULL ;
907- }
908- #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
909884
910885/*
911886 * Do the hard work of removing an element from the buddy allocator.
@@ -1033,7 +1008,6 @@ void mark_free_pages(struct zone *zone)
10331008}
10341009#endif /* CONFIG_PM */
10351010
1036- #if defined(CONFIG_HIBERNATION ) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY )
10371011/*
10381012 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
10391013 */
@@ -1064,9 +1038,6 @@ void drain_all_local_pages(void)
10641038
10651039 smp_call_function (smp_drain_local_pages , NULL , 0 , 1 );
10661040}
1067- #else
1068- void drain_all_local_pages (void ) {}
1069- #endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
10701041
10711042/*
10721043 * Free a 0-order page
@@ -1157,7 +1128,6 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
11571128 goto failed ;
11581129 }
11591130
1160- #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
11611131 /* Find a page of the appropriate migrate type */
11621132 list_for_each_entry (page , & pcp -> list , lru )
11631133 if (page_private (page ) == migratetype )
@@ -1169,9 +1139,6 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
11691139 pcp -> batch , & pcp -> list , migratetype );
11701140 page = list_entry (pcp -> list .next , struct page , lru );
11711141 }
1172- #else
1173- page = list_entry (pcp -> list .next , struct page , lru );
1174- #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
11751142
11761143 list_del (& page -> lru );
11771144 pcp -> count -- ;
@@ -2525,7 +2492,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
25252492
25262493#define LONG_ALIGN (x ) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
25272494
2528- #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
25292495/*
25302496 * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
25312497 * of blocks reserved is based on zone->pages_min. The memory within the
@@ -2579,11 +2545,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
25792545 }
25802546 }
25812547}
2582- #else
2583- static inline void setup_zone_migrate_reserve (struct zone * zone )
2584- {
2585- }
2586- #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
2548+
25872549/*
25882550 * Initially all pages are reserved - free ones are freed
25892551 * up by free_all_bootmem() once the early boot process is
0 commit comments