Skip to content

Commit

Permalink
mm, page_alloc: add static key for should_fail_alloc_page()
Browse files Browse the repository at this point in the history
Similarly to should_failslab(), remove the overhead of calling the
noinline function should_fail_alloc_page() with a static key that guards
the callsite in the page allocator hotpath, and is controlled by the
fault and error injection frameworks and bpf.

Additionally, compile out all relevant code if neither
CONFIG_FAIL_ALLOC_PAGE nor CONFIG_FUNCTION_ERROR_INJECTION is enabled.
When only the latter is not enabled, make should_fail_alloc_page()
static inline instead of noinline.

No measurement was done other than verifying the should_fail_alloc_page
is gone from the perf profile. A measurement with the analogical change
for should_failslab() suggests that for a page allocator intensive
workload there might be noticeable improvement. It also makes
CONFIG_FAIL_ALLOC_PAGE an option suitable not only for debug kernels.

Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
  • Loading branch information
tehcaster authored and Kernel Patches Daemon committed Jun 21, 2024
1 parent e70f00d commit edcc7ba
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 5 deletions.
3 changes: 2 additions & 1 deletion include/linux/fault-inject.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,9 @@ static inline void fault_config_init(struct fault_config *config,

struct kmem_cache;

#ifdef CONFIG_FUNCTION_ERROR_INJECTION
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);

#endif
#ifdef CONFIG_FAIL_PAGE_ALLOC
bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else
Expand Down
3 changes: 2 additions & 1 deletion mm/fail_page_alloc.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
#include <linux/mm.h>
#include "internal.h"

static struct {
struct fault_attr attr;
Expand All @@ -9,7 +10,7 @@ static struct {
bool ignore_gfp_reclaim;
u32 min_order;
} fail_page_alloc = {
.attr = FAULT_ATTR_INITIALIZER,
.attr = FAULT_ATTR_INITIALIZER_KEY(&should_fail_alloc_page_active.key),
.ignore_gfp_reclaim = true,
.ignore_gfp_highmem = true,
.min_order = 1,
Expand Down
2 changes: 2 additions & 0 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,8 @@ extern char * const zone_names[MAX_NR_ZONES];
/* perform sanity checks on struct pages being allocated or freed */
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);

DECLARE_STATIC_KEY_FALSE(should_fail_alloc_page_active);

extern int min_free_kbytes;

void setup_per_zone_wmarks(void);
Expand Down
30 changes: 27 additions & 3 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -3008,11 +3008,35 @@ struct page *rmqueue(struct zone *preferred_zone,
return page;
}

noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
#if defined(CONFIG_FUNCTION_ERROR_INJECTION) || defined(CONFIG_FAIL_PAGE_ALLOC)
DEFINE_STATIC_KEY_FALSE(should_fail_alloc_page_active);

#ifdef CONFIG_FUNCTION_ERROR_INJECTION
noinline
#else
static inline
#endif
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
ALLOW_ERROR_INJECTION_KEY(should_fail_alloc_page, TRUE, &should_fail_alloc_page_active);

static __always_inline bool
should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order)
{
if (static_branch_unlikely(&should_fail_alloc_page_active))
return should_fail_alloc_page(gfp_mask, order);

return false;
}
#else
static __always_inline bool
should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order)
{
return false;
}
#endif

static inline long __zone_watermark_unusable_free(struct zone *z,
unsigned int order, unsigned int alloc_flags)
Expand Down Expand Up @@ -4430,7 +4454,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,

might_alloc(gfp_mask);

if (should_fail_alloc_page(gfp_mask, order))
if (should_fail_alloc_page_wrapped(gfp_mask, order))
return false;

*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
Expand Down

0 comments on commit edcc7ba

Please sign in to comment.