Skip to content

Commit 79359d6

Browse files
mjkravetzakpm00
authored andcommitted
hugetlb: perform vmemmap optimization on a list of pages
When adding hugetlb pages to the pool, we first create a list of the allocated pages before adding to the pool. Pass this list of pages to a new routine hugetlb_vmemmap_optimize_folios() for vmemmap optimization. Due to significant differences in vmemmmap initialization for bootmem allocated hugetlb pages, a new routine prep_and_add_bootmem_folios is created. We also modify the routine vmemmap_should_optimize() to check for pages that are already optimized. There are code paths that might request vmemmap optimization twice and we want to make sure this is not attempted. Link: https://lkml.kernel.org/r/20231019023113.345257-4-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: James Houghton <jthoughton@google.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Konrad Dybcio <konradybcio@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Usama Arif <usama.arif@bytedance.com> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent d67e32f commit 79359d6

File tree

3 files changed

+51
-8
lines changed

3 files changed

+51
-8
lines changed

mm/hugetlb.c

Lines changed: 35 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2282,6 +2282,9 @@ static void prep_and_add_allocated_folios(struct hstate *h,
22822282
unsigned long flags;
22832283
struct folio *folio, *tmp_f;
22842284

2285+
/* Send list for bulk vmemmap optimization processing */
2286+
hugetlb_vmemmap_optimize_folios(h, folio_list);
2287+
22852288
/* Add all new pool pages to free lists in one lock cycle */
22862289
spin_lock_irqsave(&hugetlb_lock, flags);
22872290
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
@@ -3344,6 +3347,35 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
33443347
prep_compound_head((struct page *)folio, huge_page_order(h));
33453348
}
33463349

3350+
static void __init prep_and_add_bootmem_folios(struct hstate *h,
3351+
struct list_head *folio_list)
3352+
{
3353+
unsigned long flags;
3354+
struct folio *folio, *tmp_f;
3355+
3356+
/* Send list for bulk vmemmap optimization processing */
3357+
hugetlb_vmemmap_optimize_folios(h, folio_list);
3358+
3359+
/* Add all new pool pages to free lists in one lock cycle */
3360+
spin_lock_irqsave(&hugetlb_lock, flags);
3361+
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3362+
if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3363+
/*
3364+
* If HVO fails, initialize all tail struct pages
3365+
* We do not worry about potential long lock hold
3366+
* time as this is early in boot and there should
3367+
* be no contention.
3368+
*/
3369+
hugetlb_folio_init_tail_vmemmap(folio,
3370+
HUGETLB_VMEMMAP_RESERVE_PAGES,
3371+
pages_per_huge_page(h));
3372+
}
3373+
__prep_account_new_huge_page(h, folio_nid(folio));
3374+
enqueue_hugetlb_folio(h, folio);
3375+
}
3376+
spin_unlock_irqrestore(&hugetlb_lock, flags);
3377+
}
3378+
33473379
/*
33483380
* Put bootmem huge pages into the standard lists after mem_map is up.
33493381
* Note: This only applies to gigantic (order > MAX_ORDER) pages.
@@ -3364,20 +3396,15 @@ static void __init gather_bootmem_prealloc(void)
33643396
* in this list. If so, process each size separately.
33653397
*/
33663398
if (h != prev_h && prev_h != NULL)
3367-
prep_and_add_allocated_folios(prev_h, &folio_list);
3399+
prep_and_add_bootmem_folios(prev_h, &folio_list);
33683400
prev_h = h;
33693401

33703402
VM_BUG_ON(!hstate_is_gigantic(h));
33713403
WARN_ON(folio_ref_count(folio) != 1);
33723404

33733405
hugetlb_folio_init_vmemmap(folio, h,
33743406
HUGETLB_VMEMMAP_RESERVE_PAGES);
3375-
__prep_new_hugetlb_folio(h, folio);
3376-
/* If HVO fails, initialize all tail struct pages */
3377-
if (!HPageVmemmapOptimized(&folio->page))
3378-
hugetlb_folio_init_tail_vmemmap(folio,
3379-
HUGETLB_VMEMMAP_RESERVE_PAGES,
3380-
pages_per_huge_page(h));
3407+
init_new_hugetlb_folio(h, folio);
33813408
list_add(&folio->lru, &folio_list);
33823409

33833410
/*
@@ -3389,7 +3416,7 @@ static void __init gather_bootmem_prealloc(void)
33893416
cond_resched();
33903417
}
33913418

3392-
prep_and_add_allocated_folios(h, &folio_list);
3419+
prep_and_add_bootmem_folios(h, &folio_list);
33933420
}
33943421

33953422
static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)

mm/hugetlb_vmemmap.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -483,6 +483,9 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
483483
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
484484
static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
485485
{
486+
if (HPageVmemmapOptimized((struct page *)head))
487+
return false;
488+
486489
if (!READ_ONCE(vmemmap_optimize_enabled))
487490
return false;
488491

@@ -572,6 +575,14 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
572575
SetHPageVmemmapOptimized(head);
573576
}
574577

578+
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
579+
{
580+
struct folio *folio;
581+
582+
list_for_each_entry(folio, folio_list, lru)
583+
hugetlb_vmemmap_optimize(h, &folio->page);
584+
}
585+
575586
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
576587
{
577588
.procname = "hugetlb_optimize_vmemmap",

mm/hugetlb_vmemmap.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
2121
int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
2222
void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
23+
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
2324

2425
static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
2526
{
@@ -48,6 +49,10 @@ static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page
4849
{
4950
}
5051

52+
static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
53+
{
54+
}
55+
5156
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
5257
{
5358
return 0;

0 commit comments

Comments
 (0)