Skip to content

Commit

Permalink
mm: split a folio in minimum folio order chunks
Browse files Browse the repository at this point in the history
split_folio() and split_folio_to_list() assume order 0, to support
minorder we must expand these to check the folio mapping order and use
that.

Set new_order to be at least minimum folio order if it is set in
split_huge_page_to_list() so that we can maintain minimum folio order
requirement in the page cache.

Update the debugfs write files used for testing to ensure the order
is respected as well.

Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
  • Loading branch information
mcgrof committed Apr 25, 2024
1 parent d4f700f commit e77a2a4
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 6 deletions.
12 changes: 8 additions & 4 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ extern struct kobj_attribute shmem_enabled_attr;
#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))

#define split_folio(f) split_folio_to_list(f, NULL)

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
Expand Down Expand Up @@ -267,9 +269,10 @@ void folio_prep_large_rmappable(struct folio *folio);
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list_to_order(page, NULL, 0);
return split_folio(page_folio(page));
}
void deferred_split_folio(struct folio *folio);

Expand Down Expand Up @@ -432,6 +435,10 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
static inline int split_folio_to_list(struct page *page, struct list_head *list)
{
return 0;
}
static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
Expand Down Expand Up @@ -532,9 +539,6 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}

#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
#define split_folio(f) split_folio_to_order(f, 0)

/*
* archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
* limitations in the implementation like arm64 MTE can override this to
Expand Down
39 changes: 37 additions & 2 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -3035,6 +3035,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
* Returns 0 if the hugepage is split successfully.
* Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
* us.
*
* Callers should ensure that the order respects the address space mapping
* min-order if one is set.
*/
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
Expand All @@ -3059,11 +3062,19 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
return -EBUSY;

if (!folio_test_anon(folio)) {
unsigned int min_order;
/* Truncated ? */
if (!folio->mapping) {
ret = -EBUSY;
goto out;
}
min_order = mapping_min_folio_order(folio->mapping);
if (new_order < min_order) {
VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
min_order);
ret = -EINVAL;
goto out;
}
} else if (new_order == 1) {
/* Cannot split anonymous THP to order-1 */
VM_WARN_ONCE(1, "Cannot split to order-1 folio");
Expand Down Expand Up @@ -3226,6 +3237,16 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
return ret;
}

int split_folio_to_list(struct folio *folio, struct list_head *list)
{
unsigned int min_order = 0;

if (!folio_test_anon(folio))
min_order = mapping_min_folio_order(folio->mapping);

return split_huge_page_to_list_to_order(&folio->page, list, min_order);
}

void folio_undo_large_rmappable(struct folio *folio)
{
struct deferred_split *ds_queue;
Expand Down Expand Up @@ -3488,6 +3509,15 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
if (new_order >= folio_order(folio))
goto next;

if (!folio_test_anon(folio)) {
unsigned int min_order = mapping_min_folio_order(folio->mapping);
if (min_order > new_order) {
pr_debug("cannot split below min_order: %u\n",
min_order);
goto next;
}
}

total++;
/*
* For folios with private, split_huge_page_to_list_to_order()
Expand Down Expand Up @@ -3528,6 +3558,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
pgoff_t index, fsize;
int nr_pages = 1;
unsigned long total = 0, split = 0;
unsigned int min_order;

file = getname_kernel(file_path);
if (IS_ERR(file))
Expand All @@ -3542,8 +3573,12 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
if (off_end > fsize)
off_end = fsize;

pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
file_path, off_start, off_end);
min_order = mapping_min_folio_order(mapping);
if (new_order < min_order)
new_order = min_order;

pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx] with order: %u\n",
file_path, off_start, off_end, new_order);

for (index = off_start; index < off_end; index += nr_pages) {
struct folio *folio = filemap_get_folio(mapping, index);
Expand Down

0 comments on commit e77a2a4

Please sign in to comment.