Skip to content

Commit d2d243d

Browse files
Baolin Wangakpm00
authored andcommitted
mm: shmem: fix khugepaged activation policy for shmem
Shmem has a separate interface (different from anonymous pages) to control huge page allocation, that means shmem THP can be enabled while anonymous THP is disabled. However, in this case, khugepaged will not start to collapse shmem THP, which is unreasonable. To fix this issue, we should call start_stop_khugepaged() to activate or deactivate the khugepaged thread when setting shmem mTHP interfaces. Moreover, add a new helper shmem_hpage_pmd_enabled() to help to check whether shmem THP is enabled, which will determine if khugepaged should be activated. Link: https://lkml.kernel.org/r/9b9c6cbc4499bf44c6455367fd9e0f6036525680.1726978977.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reported-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent f8f55e9 commit d2d243d

File tree

3 files changed

+38
-3
lines changed

3 files changed

+38
-3
lines changed

include/linux/shmem_fs.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,13 +114,19 @@ int shmem_unuse(unsigned int type);
114114
unsigned long shmem_allowable_huge_orders(struct inode *inode,
115115
struct vm_area_struct *vma, pgoff_t index,
116116
loff_t write_end, bool shmem_huge_force);
117+
bool shmem_hpage_pmd_enabled(void);
117118
#else
118119
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
119120
struct vm_area_struct *vma, pgoff_t index,
120121
loff_t write_end, bool shmem_huge_force)
121122
{
122123
return 0;
123124
}
125+
126+
static inline bool shmem_hpage_pmd_enabled(void)
127+
{
128+
return false;
129+
}
124130
#endif
125131

126132
#ifdef CONFIG_SHMEM

mm/khugepaged.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -416,9 +416,11 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
416416
static bool hugepage_pmd_enabled(void)
417417
{
418418
/*
419-
* We cover both the anon and the file-backed case here; file-backed
419+
* We cover the anon, shmem and the file-backed case here; file-backed
420420
* hugepages, when configured in, are determined by the global control.
421421
* Anon pmd-sized hugepages are determined by the pmd-size control.
422+
* Shmem pmd-sized hugepages are also determined by its pmd-size control,
423+
* except when the global shmem_huge is set to SHMEM_HUGE_DENY.
422424
*/
423425
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
424426
hugepage_global_enabled())
@@ -430,6 +432,8 @@ static bool hugepage_pmd_enabled(void)
430432
if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
431433
hugepage_global_enabled())
432434
return true;
435+
if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
436+
return true;
433437
return false;
434438
}
435439

mm/shmem.c

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1655,6 +1655,23 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
16551655
}
16561656

16571657
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1658+
bool shmem_hpage_pmd_enabled(void)
1659+
{
1660+
if (shmem_huge == SHMEM_HUGE_DENY)
1661+
return false;
1662+
if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1663+
return true;
1664+
if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1665+
return true;
1666+
if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1667+
return true;
1668+
if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1669+
shmem_huge != SHMEM_HUGE_NEVER)
1670+
return true;
1671+
1672+
return false;
1673+
}
1674+
16581675
unsigned long shmem_allowable_huge_orders(struct inode *inode,
16591676
struct vm_area_struct *vma, pgoff_t index,
16601677
loff_t write_end, bool shmem_huge_force)
@@ -5024,7 +5041,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
50245041
struct kobj_attribute *attr, const char *buf, size_t count)
50255042
{
50265043
char tmp[16];
5027-
int huge;
5044+
int huge, err;
50285045

50295046
if (count + 1 > sizeof(tmp))
50305047
return -EINVAL;
@@ -5048,7 +5065,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
50485065
shmem_huge = huge;
50495066
if (shmem_huge > SHMEM_HUGE_DENY)
50505067
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5051-
return count;
5068+
5069+
err = start_stop_khugepaged();
5070+
return err ? err : count;
50525071
}
50535072

50545073
struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
@@ -5125,6 +5144,12 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
51255144
ret = -EINVAL;
51265145
}
51275146

5147+
if (ret > 0) {
5148+
int err = start_stop_khugepaged();
5149+
5150+
if (err)
5151+
ret = err;
5152+
}
51285153
return ret;
51295154
}
51305155

0 commit comments

Comments
 (0)