From 30891fa26b1a5e821e3385a06c09cd66d8fcc544 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 20 Jun 2024 00:49:01 +0200 Subject: [PATCH] mm, page_alloc: add static key for should_fail_alloc_page() Similarly to should_failslab(), remove the overhead of calling the noinline function should_fail_alloc_page() with a static key that guards the callsite in the page allocator hotpath, and is controlled by the fault and error injection frameworks and bpf. Additionally, compile out all relevant code if neither CONFIG_FAIL_ALLOC_PAGE nor CONFIG_FUNCTION_ERROR_INJECTION is enabled. When only the latter is not enabled, make should_fail_alloc_page() static inline instead of noinline. No measurement was done other than verifying the should_fail_alloc_page is gone from the perf profile. A measurement with the analogical change for should_failslab() suggests that for a page allocator intensive workload there might be noticeable improvement. It also makes CONFIG_FAIL_ALLOC_PAGE an option suitable not only for debug kernels. Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/fault-inject.h | 3 ++- mm/fail_page_alloc.c | 3 ++- mm/internal.h | 2 ++ mm/page_alloc.c | 30 +++++++++++++++++++++++++++--- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 0d0fa94dc1c83..1a782042ae802 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -96,8 +96,9 @@ static inline void fault_config_init(struct fault_config *config, struct kmem_cache; +#ifdef CONFIG_FUNCTION_ERROR_INJECTION bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); - +#endif #ifdef CONFIG_FAIL_PAGE_ALLOC bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); #else diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c index b1b09cce93943..0906b76d78e88 100644 --- a/mm/fail_page_alloc.c +++ b/mm/fail_page_alloc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include "internal.h" static struct { struct fault_attr attr; @@ -9,7 +10,7 @@ static struct { bool ignore_gfp_reclaim; u32 min_order; } fail_page_alloc = { - .attr = FAULT_ATTR_INITIALIZER, + .attr = FAULT_ATTR_INITIALIZER_KEY(&should_fail_alloc_page_active.key), .ignore_gfp_reclaim = true, .ignore_gfp_highmem = true, .min_order = 1, diff --git a/mm/internal.h b/mm/internal.h index b2c75b12014e7..8539e39b02e64 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -410,6 +410,8 @@ extern char * const zone_names[MAX_NR_ZONES]; /* perform sanity checks on struct pages being allocated or freed */ DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); +DECLARE_STATIC_KEY_FALSE(should_fail_alloc_page_active); + extern int min_free_kbytes; void setup_per_zone_wmarks(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2e22ce5675ca1..b6e246acb4aa7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3008,11 +3008,35 @@ struct page *rmqueue(struct zone *preferred_zone, return page; } -noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +#if defined(CONFIG_FUNCTION_ERROR_INJECTION) || defined(CONFIG_FAIL_PAGE_ALLOC) +DEFINE_STATIC_KEY_FALSE(should_fail_alloc_page_active); + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +noinline +#else +static inline +#endif +bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); } -ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); +ALLOW_ERROR_INJECTION_KEY(should_fail_alloc_page, TRUE, &should_fail_alloc_page_active); + +static __always_inline bool +should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order) +{ + if (static_branch_unlikely(&should_fail_alloc_page_active)) + return should_fail_alloc_page(gfp_mask, order); + + return false; +} +#else +static __always_inline bool +should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order) +{ + return false; +} +#endif static inline long __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) @@ -4430,7 +4454,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, might_alloc(gfp_mask); - if (should_fail_alloc_page(gfp_mask, order)) + if (should_fail_alloc_page_wrapped(gfp_mask, order)) return false; *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);