diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 0d0fa94dc1c83..1a782042ae802 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -96,8 +96,9 @@ static inline void fault_config_init(struct fault_config *config, struct kmem_cache; +#ifdef CONFIG_FUNCTION_ERROR_INJECTION bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); - +#endif #ifdef CONFIG_FAIL_PAGE_ALLOC bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); #else diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c index b1b09cce93943..0906b76d78e88 100644 --- a/mm/fail_page_alloc.c +++ b/mm/fail_page_alloc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include "internal.h" static struct { struct fault_attr attr; @@ -9,7 +10,7 @@ static struct { bool ignore_gfp_reclaim; u32 min_order; } fail_page_alloc = { - .attr = FAULT_ATTR_INITIALIZER, + .attr = FAULT_ATTR_INITIALIZER_KEY(&should_fail_alloc_page_active.key), .ignore_gfp_reclaim = true, .ignore_gfp_highmem = true, .min_order = 1, diff --git a/mm/internal.h b/mm/internal.h index b2c75b12014e7..8539e39b02e64 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -410,6 +410,8 @@ extern char * const zone_names[MAX_NR_ZONES]; /* perform sanity checks on struct pages being allocated or freed */ DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); +DECLARE_STATIC_KEY_FALSE(should_fail_alloc_page_active); + extern int min_free_kbytes; void setup_per_zone_wmarks(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2e22ce5675ca1..b6e246acb4aa7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3008,11 +3008,35 @@ struct page *rmqueue(struct zone *preferred_zone, return page; } -noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +#if defined(CONFIG_FUNCTION_ERROR_INJECTION) || defined(CONFIG_FAIL_PAGE_ALLOC) +DEFINE_STATIC_KEY_FALSE(should_fail_alloc_page_active); + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +noinline +#else +static inline +#endif +bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); } -ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); +ALLOW_ERROR_INJECTION_KEY(should_fail_alloc_page, TRUE, &should_fail_alloc_page_active); + +static __always_inline bool +should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order) +{ + if (static_branch_unlikely(&should_fail_alloc_page_active)) + return should_fail_alloc_page(gfp_mask, order); + + return false; +} +#else +static __always_inline bool +should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order) +{ + return false; +} +#endif static inline long __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) @@ -4430,7 +4454,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, might_alloc(gfp_mask); - if (should_fail_alloc_page(gfp_mask, order)) + if (should_fail_alloc_page_wrapped(gfp_mask, order)) return false; *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);