Skip to content

Commit 64c5e13

Browse files
gormanmLinus Torvalds
authored andcommitted
don't group high order atomic allocations
Grouping high-order atomic allocations together was intended to allow bursty users of atomic allocations to work such as e1000 in situations where their preallocated buffers were depleted. This did not work in at least one case with a wireless network adapter needing order-1 allocations frequently. To resolve that, the free pages used for min_free_kbytes were moved to separate contiguous blocks with the patch bias-the-location-of-pages-freed-for-min_free_kbytes-in-the-same-max_order_nr_pages-blocks. It is felt that keeping the free pages in the same contiguous blocks should be sufficient for bursty short-lived high-order atomic allocations to succeed, maybe even with the e1000. Even if there is a failure, increasing the value of min_free_kbytes will free pages as contiguous bloks in contrast to the standard buddy allocator which makes no attempt to keep the minimum number of free pages contiguous. This patch backs out grouping high order atomic allocations together to determine if it is really needed or not. If a new report comes in about high-order atomic allocations failing, the feature can be reintroduced to determine if it fixes the problem or not. As a side-effect, this patch reduces by 1 the number of bits required to track the mobility type of pages within a MAX_ORDER_NR_PAGES block. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent ac0e5b7 commit 64c5e13

File tree

3 files changed

+10
-34
lines changed

3 files changed

+10
-34
lines changed

include/linux/mmzone.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@
3636
#define MIGRATE_UNMOVABLE 0
3737
#define MIGRATE_RECLAIMABLE 1
3838
#define MIGRATE_MOVABLE 2
39-
#define MIGRATE_HIGHATOMIC 3
40-
#define MIGRATE_RESERVE 4
41-
#define MIGRATE_TYPES 5
39+
#define MIGRATE_RESERVE 3
40+
#define MIGRATE_TYPES 4
4241

4342
#define for_each_migratetype_order(order, type) \
4443
for (order = 0; order < MAX_ORDER; order++) \

include/linux/pageblock-flags.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
/* Bit indices that affect a whole block of pages */
3333
enum pageblock_bits {
34-
PB_range(PB_migrate, 3), /* 3 bits required for migrate types */
34+
PB_range(PB_migrate, 2), /* 2 bits required for migrate types */
3535
NR_PAGEBLOCK_BITS
3636
};
3737

mm/page_alloc.c

Lines changed: 7 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -174,18 +174,13 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
174174
PB_migrate, PB_migrate_end);
175175
}
176176

177-
static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
177+
static inline int allocflags_to_migratetype(gfp_t gfp_flags)
178178
{
179179
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
180180

181181
if (unlikely(page_group_by_mobility_disabled))
182182
return MIGRATE_UNMOVABLE;
183183

184-
/* Cluster high-order atomic allocations together */
185-
if (unlikely(order > 0) &&
186-
(!(gfp_flags & __GFP_WAIT) || in_interrupt()))
187-
return MIGRATE_HIGHATOMIC;
188-
189184
/* Cluster based on mobility */
190185
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
191186
((gfp_flags & __GFP_RECLAIMABLE) != 0);
@@ -706,11 +701,10 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
706701
* the free lists for the desirable migrate type are depleted
707702
*/
708703
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
709-
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
710-
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
711-
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
712-
[MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
713-
[MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
704+
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
705+
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
706+
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
707+
[MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
714708
};
715709

716710
/*
@@ -804,9 +798,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
804798
int current_order;
805799
struct page *page;
806800
int migratetype, i;
807-
int nonatomic_fallback_atomic = 0;
808801

809-
retry:
810802
/* Find the largest possible block of pages in the other list */
811803
for (current_order = MAX_ORDER-1; current_order >= order;
812804
--current_order) {
@@ -816,14 +808,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
816808
/* MIGRATE_RESERVE handled later if necessary */
817809
if (migratetype == MIGRATE_RESERVE)
818810
continue;
819-
/*
820-
* Make it hard to fallback to blocks used for
821-
* high-order atomic allocations
822-
*/
823-
if (migratetype == MIGRATE_HIGHATOMIC &&
824-
start_migratetype != MIGRATE_UNMOVABLE &&
825-
!nonatomic_fallback_atomic)
826-
continue;
827811

828812
area = &(zone->free_area[current_order]);
829813
if (list_empty(&area->free_list[migratetype]))
@@ -849,8 +833,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
849833
start_migratetype);
850834

851835
/* Claim the whole block if over half of it is free */
852-
if ((pages << current_order) >= (1 << (MAX_ORDER-2)) &&
853-
migratetype != MIGRATE_HIGHATOMIC)
836+
if ((pages << current_order) >= (1 << (MAX_ORDER-2)))
854837
set_pageblock_migratetype(page,
855838
start_migratetype);
856839

@@ -872,12 +855,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
872855
}
873856
}
874857

875-
/* Allow fallback to high-order atomic blocks if memory is that low */
876-
if (!nonatomic_fallback_atomic) {
877-
nonatomic_fallback_atomic = 1;
878-
goto retry;
879-
}
880-
881858
/* Use MIGRATE_RESERVE rather than fail an allocation */
882859
return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
883860
}
@@ -1112,7 +1089,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
11121089
struct page *page;
11131090
int cold = !!(gfp_flags & __GFP_COLD);
11141091
int cpu;
1115-
int migratetype = allocflags_to_migratetype(gfp_flags, order);
1092+
int migratetype = allocflags_to_migratetype(gfp_flags);
11161093

11171094
again:
11181095
cpu = get_cpu();

0 commit comments

Comments
 (0)