Skip to content

Commit b4c9ffb

Browse files
RichardWeiYangakpm00
authored andcommitted
mm/khugepaged: remove definition of struct khugepaged_mm_slot
Current code is not correct to get struct khugepaged_mm_slot by mm_slot_entry() without checking mm_slot is !NULL. There is no problem reported since slot is the first element of struct khugepaged_mm_slot. While struct khugepaged_mm_slot is just a wrapper of struct mm_slot, there is no need to define it. Remove the definition of struct khugepaged_mm_slot, so there is not chance to miss use mm_slot_entry(). [richard.weiyang@gmail.com: fix use-after-free crash] Link: https://lkml.kernel.org/r/20250922002834.vz6ntj36e75ehkyp@master Link: https://lkml.kernel.org/r/20250919071244.17020-3-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: David Hildenbrand <david@redhat.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Kiryl Shutsemau <kirill@shutemov.name> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 08498be commit b4c9ffb

File tree

1 file changed

+21
-37
lines changed

1 file changed

+21
-37
lines changed

mm/khugepaged.c

Lines changed: 21 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -103,14 +103,6 @@ struct collapse_control {
103103
nodemask_t alloc_nmask;
104104
};
105105

106-
/**
107-
* struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
108-
* @slot: hash lookup from mm to mm_slot
109-
*/
110-
struct khugepaged_mm_slot {
111-
struct mm_slot slot;
112-
};
113-
114106
/**
115107
* struct khugepaged_scan - cursor for scanning
116108
* @mm_head: the head of the mm list to scan
@@ -121,7 +113,7 @@ struct khugepaged_mm_slot {
121113
*/
122114
struct khugepaged_scan {
123115
struct list_head mm_head;
124-
struct khugepaged_mm_slot *mm_slot;
116+
struct mm_slot *mm_slot;
125117
unsigned long address;
126118
};
127119

@@ -384,7 +376,10 @@ int hugepage_madvise(struct vm_area_struct *vma,
384376

385377
int __init khugepaged_init(void)
386378
{
387-
mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
379+
mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
380+
sizeof(struct mm_slot),
381+
__alignof__(struct mm_slot),
382+
0, NULL);
388383
if (!mm_slot_cache)
389384
return -ENOMEM;
390385

@@ -438,7 +433,6 @@ static bool hugepage_pmd_enabled(void)
438433

439434
void __khugepaged_enter(struct mm_struct *mm)
440435
{
441-
struct khugepaged_mm_slot *mm_slot;
442436
struct mm_slot *slot;
443437
int wakeup;
444438

@@ -447,12 +441,10 @@ void __khugepaged_enter(struct mm_struct *mm)
447441
if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
448442
return;
449443

450-
mm_slot = mm_slot_alloc(mm_slot_cache);
451-
if (!mm_slot)
444+
slot = mm_slot_alloc(mm_slot_cache);
445+
if (!slot)
452446
return;
453447

454-
slot = &mm_slot->slot;
455-
456448
spin_lock(&khugepaged_mm_lock);
457449
mm_slot_insert(mm_slots_hash, mm, slot);
458450
/*
@@ -480,14 +472,12 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
480472

481473
void __khugepaged_exit(struct mm_struct *mm)
482474
{
483-
struct khugepaged_mm_slot *mm_slot;
484475
struct mm_slot *slot;
485476
int free = 0;
486477

487478
spin_lock(&khugepaged_mm_lock);
488479
slot = mm_slot_lookup(mm_slots_hash, mm);
489-
mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
490-
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
480+
if (slot && khugepaged_scan.mm_slot != slot) {
491481
hash_del(&slot->hash);
492482
list_del(&slot->mm_node);
493483
free = 1;
@@ -496,9 +486,9 @@ void __khugepaged_exit(struct mm_struct *mm)
496486

497487
if (free) {
498488
mm_flags_clear(MMF_VM_HUGEPAGE, mm);
499-
mm_slot_free(mm_slot_cache, mm_slot);
489+
mm_slot_free(mm_slot_cache, slot);
500490
mmdrop(mm);
501-
} else if (mm_slot) {
491+
} else if (slot) {
502492
/*
503493
* This is required to serialize against
504494
* hpage_collapse_test_exit() (which is guaranteed to run
@@ -1432,9 +1422,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
14321422
return result;
14331423
}
14341424

1435-
static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1425+
static void collect_mm_slot(struct mm_slot *slot)
14361426
{
1437-
struct mm_slot *slot = &mm_slot->slot;
14381427
struct mm_struct *mm = slot->mm;
14391428

14401429
lockdep_assert_held(&khugepaged_mm_lock);
@@ -1451,7 +1440,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
14511440
*/
14521441

14531442
/* khugepaged_mm_lock actually not necessary for the below */
1454-
mm_slot_free(mm_slot_cache, mm_slot);
1443+
mm_slot_free(mm_slot_cache, slot);
14551444
mmdrop(mm);
14561445
}
14571446
}
@@ -2394,7 +2383,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
23942383
__acquires(&khugepaged_mm_lock)
23952384
{
23962385
struct vma_iterator vmi;
2397-
struct khugepaged_mm_slot *mm_slot;
23982386
struct mm_slot *slot;
23992387
struct mm_struct *mm;
24002388
struct vm_area_struct *vma;
@@ -2405,14 +2393,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
24052393
*result = SCAN_FAIL;
24062394

24072395
if (khugepaged_scan.mm_slot) {
2408-
mm_slot = khugepaged_scan.mm_slot;
2409-
slot = &mm_slot->slot;
2396+
slot = khugepaged_scan.mm_slot;
24102397
} else {
24112398
slot = list_first_entry(&khugepaged_scan.mm_head,
24122399
struct mm_slot, mm_node);
2413-
mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
24142400
khugepaged_scan.address = 0;
2415-
khugepaged_scan.mm_slot = mm_slot;
2401+
khugepaged_scan.mm_slot = slot;
24162402
}
24172403
spin_unlock(&khugepaged_mm_lock);
24182404

@@ -2510,7 +2496,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
25102496
breakouterloop_mmap_lock:
25112497

25122498
spin_lock(&khugepaged_mm_lock);
2513-
VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2499+
VM_BUG_ON(khugepaged_scan.mm_slot != slot);
25142500
/*
25152501
* Release the current mm_slot if this mm is about to die, or
25162502
* if we scanned all vmas of this mm.
@@ -2522,16 +2508,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
25222508
* mm_slot not pointing to the exiting mm.
25232509
*/
25242510
if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) {
2525-
slot = list_next_entry(slot, mm_node);
2526-
khugepaged_scan.mm_slot =
2527-
mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2511+
khugepaged_scan.mm_slot = list_next_entry(slot, mm_node);
25282512
khugepaged_scan.address = 0;
25292513
} else {
25302514
khugepaged_scan.mm_slot = NULL;
25312515
khugepaged_full_scans++;
25322516
}
25332517

2534-
collect_mm_slot(mm_slot);
2518+
collect_mm_slot(slot);
25352519
}
25362520

25372521
return progress;
@@ -2618,7 +2602,7 @@ static void khugepaged_wait_work(void)
26182602

26192603
static int khugepaged(void *none)
26202604
{
2621-
struct khugepaged_mm_slot *mm_slot;
2605+
struct mm_slot *slot;
26222606

26232607
set_freezable();
26242608
set_user_nice(current, MAX_NICE);
@@ -2629,10 +2613,10 @@ static int khugepaged(void *none)
26292613
}
26302614

26312615
spin_lock(&khugepaged_mm_lock);
2632-
mm_slot = khugepaged_scan.mm_slot;
2616+
slot = khugepaged_scan.mm_slot;
26332617
khugepaged_scan.mm_slot = NULL;
2634-
if (mm_slot)
2635-
collect_mm_slot(mm_slot);
2618+
if (slot)
2619+
collect_mm_slot(slot);
26362620
spin_unlock(&khugepaged_mm_lock);
26372621
return 0;
26382622
}

0 commit comments

Comments
 (0)