Skip to content

Commit ac0e5b7

Browse files
gormanmLinus Torvalds
authored andcommitted
remove PAGE_GROUP_BY_MOBILITY
Grouping pages by mobility can be disabled at compile-time. This was considered undesirable by a number of people. However, in the current stack of patches, it is not a simple case of just dropping the configurable patch as it would cause merge conflicts. This patch backs out the configuration option. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 56fd56b commit ac0e5b7

File tree

3 files changed

+2
-62
lines changed

3 files changed

+2
-62
lines changed

include/linux/mmzone.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,12 @@
3333
*/
3434
#define PAGE_ALLOC_COSTLY_ORDER 3
3535

36-
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
3736
#define MIGRATE_UNMOVABLE 0
3837
#define MIGRATE_RECLAIMABLE 1
3938
#define MIGRATE_MOVABLE 2
4039
#define MIGRATE_HIGHATOMIC 3
4140
#define MIGRATE_RESERVE 4
4241
#define MIGRATE_TYPES 5
43-
#else
44-
#define MIGRATE_UNMOVABLE 0
45-
#define MIGRATE_UNRECLAIMABLE 0
46-
#define MIGRATE_MOVABLE 0
47-
#define MIGRATE_HIGHATOMIC 0
48-
#define MIGRATE_RESERVE 0
49-
#define MIGRATE_TYPES 1
50-
#endif
5142

5243
#define for_each_migratetype_order(order, type) \
5344
for (order = 0; order < MAX_ORDER; order++) \

init/Kconfig

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -607,19 +607,6 @@ config BASE_SMALL
607607
default 0 if BASE_FULL
608608
default 1 if !BASE_FULL
609609

610-
config PAGE_GROUP_BY_MOBILITY
611-
bool "Group pages based on their mobility in the page allocator"
612-
def_bool y
613-
help
614-
The standard allocator will fragment memory over time which means
615-
that high order allocations will fail even if kswapd is running. If
616-
this option is set, the allocator will try and group page types
617-
based on their ability to migrate or reclaim. This is a best effort
618-
attempt at lowering fragmentation which a few workloads care about.
619-
The loss is a more complex allocator that may perform slower. If
620-
you are interested in working with large pages, say Y and set
621-
/proc/sys/vm/min_free_bytes to 16374. Otherwise say N
622-
623610
menuconfig MODULES
624611
bool "Enable loadable module support"
625612
help

mm/page_alloc.c

Lines changed: 2 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,6 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
158158
EXPORT_SYMBOL(nr_node_ids);
159159
#endif
160160

161-
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
162161
int page_group_by_mobility_disabled __read_mostly;
163162

164163
static inline int get_pageblock_migratetype(struct page *page)
@@ -192,22 +191,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
192191
((gfp_flags & __GFP_RECLAIMABLE) != 0);
193192
}
194193

195-
#else
196-
static inline int get_pageblock_migratetype(struct page *page)
197-
{
198-
return MIGRATE_UNMOVABLE;
199-
}
200-
201-
static void set_pageblock_migratetype(struct page *page, int migratetype)
202-
{
203-
}
204-
205-
static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
206-
{
207-
return MIGRATE_UNMOVABLE;
208-
}
209-
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
210-
211194
#ifdef CONFIG_DEBUG_VM
212195
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
213196
{
@@ -718,7 +701,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
718701
}
719702

720703

721-
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
722704
/*
723705
* This array describes the order lists are fallen back to when
724706
* the free lists for the desirable migrate type are depleted
@@ -750,7 +732,7 @@ int move_freepages(struct zone *zone,
750732
* CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
751733
* anyway as we check zone boundaries in move_freepages_block().
752734
* Remove at a later date when no bug reports exist related to
753-
* CONFIG_PAGE_GROUP_BY_MOBILITY
735+
* grouping pages by mobility
754736
*/
755737
BUG_ON(page_zone(start_page) != page_zone(end_page));
756738
#endif
@@ -899,13 +881,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
899881
/* Use MIGRATE_RESERVE rather than fail an allocation */
900882
return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
901883
}
902-
#else
903-
static struct page *__rmqueue_fallback(struct zone *zone, int order,
904-
int start_migratetype)
905-
{
906-
return NULL;
907-
}
908-
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
909884

910885
/*
911886
* Do the hard work of removing an element from the buddy allocator.
@@ -1033,7 +1008,6 @@ void mark_free_pages(struct zone *zone)
10331008
}
10341009
#endif /* CONFIG_PM */
10351010

1036-
#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
10371011
/*
10381012
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
10391013
*/
@@ -1064,9 +1038,6 @@ void drain_all_local_pages(void)
10641038

10651039
smp_call_function(smp_drain_local_pages, NULL, 0, 1);
10661040
}
1067-
#else
1068-
void drain_all_local_pages(void) {}
1069-
#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
10701041

10711042
/*
10721043
* Free a 0-order page
@@ -1157,7 +1128,6 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
11571128
goto failed;
11581129
}
11591130

1160-
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
11611131
/* Find a page of the appropriate migrate type */
11621132
list_for_each_entry(page, &pcp->list, lru)
11631133
if (page_private(page) == migratetype)
@@ -1169,9 +1139,6 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
11691139
pcp->batch, &pcp->list, migratetype);
11701140
page = list_entry(pcp->list.next, struct page, lru);
11711141
}
1172-
#else
1173-
page = list_entry(pcp->list.next, struct page, lru);
1174-
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
11751142

11761143
list_del(&page->lru);
11771144
pcp->count--;
@@ -2525,7 +2492,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
25252492

25262493
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
25272494

2528-
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
25292495
/*
25302496
* Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
25312497
* of blocks reserved is based on zone->pages_min. The memory within the
@@ -2579,11 +2545,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
25792545
}
25802546
}
25812547
}
2582-
#else
2583-
static inline void setup_zone_migrate_reserve(struct zone *zone)
2584-
{
2585-
}
2586-
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
2548+
25872549
/*
25882550
* Initially all pages are reserved - free ones are freed
25892551
* up by free_all_bootmem() once the early boot process is

0 commit comments

Comments
 (0)