Skip to content

Commit c5bb27e

Browse files
Alexei Starovoitovakpm00
authored andcommitted
mm/page_alloc: avoid second trylock of zone->lock
spin_trylock followed by spin_lock will cause extra write cache access. If the lock is contended it may cause unnecessary cache line bouncing and will execute redundant irq restore/save pair. Therefore, check alloc/fpi_flags first and use spin_trylock or spin_lock. Link: https://lkml.kernel.org/r/20250331002809.94758-1-alexei.starovoitov@gmail.com Fixes: 97769a5 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") Signed-off-by: Alexei Starovoitov <ast@kernel.org> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Daniel Borkman <daniel@iogearbox.net> Cc: Martin KaFai Lau <martin.lau@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent a84edd5 commit c5bb27e

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

mm/page_alloc.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1400,11 +1400,12 @@ static void free_one_page(struct zone *zone, struct page *page,
14001400
struct llist_head *llhead;
14011401
unsigned long flags;
14021402

1403-
if (!spin_trylock_irqsave(&zone->lock, flags)) {
1404-
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1403+
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1404+
if (!spin_trylock_irqsave(&zone->lock, flags)) {
14051405
add_page_to_zone_llist(zone, page, order);
14061406
return;
14071407
}
1408+
} else {
14081409
spin_lock_irqsave(&zone->lock, flags);
14091410
}
14101411

@@ -2314,9 +2315,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
23142315
unsigned long flags;
23152316
int i;
23162317

2317-
if (!spin_trylock_irqsave(&zone->lock, flags)) {
2318-
if (unlikely(alloc_flags & ALLOC_TRYLOCK))
2318+
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2319+
if (!spin_trylock_irqsave(&zone->lock, flags))
23192320
return 0;
2321+
} else {
23202322
spin_lock_irqsave(&zone->lock, flags);
23212323
}
23222324
for (i = 0; i < count; ++i) {
@@ -2937,9 +2939,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
29372939

29382940
do {
29392941
page = NULL;
2940-
if (!spin_trylock_irqsave(&zone->lock, flags)) {
2941-
if (unlikely(alloc_flags & ALLOC_TRYLOCK))
2942+
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2943+
if (!spin_trylock_irqsave(&zone->lock, flags))
29422944
return NULL;
2945+
} else {
29432946
spin_lock_irqsave(&zone->lock, flags);
29442947
}
29452948
if (alloc_flags & ALLOC_HIGHATOMIC)

0 commit comments

Comments
 (0)