Skip to content

Commit 8846d96

Browse files
riteshharjanimpe
authored andcommitted
book3s64/hash: Early detect debug_pagealloc size requirement
Add hash_supports_debug_pagealloc() helper to detect whether debug_pagealloc can be supported on hash or not. This checks for both, whether debug_pagealloc config is enabled and the linear map should fit within rma_size/4 region size. This can then be used early during htab_init_page_sizes() to decide linear map pagesize if hash supports either debug_pagealloc or kfence. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://patch.msgid.link/c33c6691b2a2cf619cc74ac100118ca4dbf21a48.1729271995.git.ritesh.list@gmail.com
1 parent 76b7d64 commit 8846d96

File tree

1 file changed

+13
-12
lines changed

1 file changed

+13
-12
lines changed

arch/powerpc/mm/book3s64/hash_utils.c

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -329,25 +329,26 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx,
329329
}
330330
#endif
331331

332+
static inline bool hash_supports_debug_pagealloc(void)
333+
{
334+
unsigned long max_hash_count = ppc64_rma_size / 4;
335+
unsigned long linear_map_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
336+
337+
if (!debug_pagealloc_enabled() || linear_map_count > max_hash_count)
338+
return false;
339+
return true;
340+
}
341+
332342
#ifdef CONFIG_DEBUG_PAGEALLOC
333343
static u8 *linear_map_hash_slots;
334344
static unsigned long linear_map_hash_count;
335345
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
336346
static void hash_debug_pagealloc_alloc_slots(void)
337347
{
338-
unsigned long max_hash_count = ppc64_rma_size / 4;
339-
340-
if (!debug_pagealloc_enabled())
341-
return;
342-
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
343-
if (unlikely(linear_map_hash_count > max_hash_count)) {
344-
pr_info("linear map size (%llu) greater than 4 times RMA region (%llu). Disabling debug_pagealloc\n",
345-
((u64)linear_map_hash_count << PAGE_SHIFT),
346-
ppc64_rma_size);
347-
linear_map_hash_count = 0;
348+
if (!hash_supports_debug_pagealloc())
348349
return;
349-
}
350350

351+
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
351352
linear_map_hash_slots = memblock_alloc_try_nid(
352353
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
353354
ppc64_rma_size, NUMA_NO_NODE);
@@ -1076,7 +1077,7 @@ static void __init htab_init_page_sizes(void)
10761077
bool aligned = true;
10771078
init_hpte_page_sizes();
10781079

1079-
if (!debug_pagealloc_enabled() && !kfence_early_init_enabled()) {
1080+
if (!hash_supports_debug_pagealloc() && !kfence_early_init_enabled()) {
10801081
/*
10811082
* Pick a size for the linear mapping. Currently, we only
10821083
* support 16M, 1M and 4K which is the default

0 commit comments

Comments
 (0)