Skip to content

Commit 2e34687

Browse files
xzpeterakpm00
authored andcommitted
mm: remember young/dirty bit for page migrations
When page migration happens, we always ignore the young/dirty bit settings in the old pgtable, and marking the page as old in the new page table using either pte_mkold() or pmd_mkold(), and keeping the pte clean. That's fine from functional-wise, but that's not friendly to page reclaim because the moving page can be actively accessed within the procedure. Not to mention hardware setting the young bit can bring quite some overhead on some systems, e.g. x86_64 needs a few hundreds nanoseconds to set the bit. The same slowdown problem to dirty bits when the memory is first written after page migration happened. Actually we can easily remember the A/D bit configuration and recover the information after the page is migrated. To achieve it, define a new set of bits in the migration swap offset field to cache the A/D bits for old pte. Then when removing/recovering the migration entry, we can recover the A/D bits even if the page changed. One thing to mention is that here we used max_swapfile_size() to detect how many swp offset bits we have, and we'll only enable this feature if we know the swp offset is big enough to store both the PFN value and the A/D bits. Otherwise the A/D bits are dropped like before. Link: https://lkml.kernel.org/r/20220811161331.37055-6-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A . Shutemov" <kirill@shutemov.name> Cc: Minchan Kim <minchan@kernel.org> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 0ccf7f1 commit 2e34687

File tree

5 files changed

+130
-4
lines changed

5 files changed

+130
-4
lines changed

include/linux/swapops.h

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@
88

99
#ifdef CONFIG_MMU
1010

11+
#ifdef CONFIG_SWAP
12+
#include <linux/swapfile.h>
13+
#endif /* CONFIG_SWAP */
14+
1115
/*
1216
* swapcache pages are stored in the swapper_space radix tree. We want to
1317
* get good packing density in that tree, so the index should be dense in
@@ -35,6 +39,31 @@
3539
#endif /* MAX_PHYSMEM_BITS */
3640
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
3741

42+
/**
43+
* Migration swap entry specific bitfield definitions. Layout:
44+
*
45+
* |----------+--------------------|
46+
* | swp_type | swp_offset |
47+
* |----------+--------+-+-+-------|
48+
* | | resv |D|A| PFN |
49+
* |----------+--------+-+-+-------|
50+
*
51+
* @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
52+
* @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
53+
*
54+
* Note: A/D bits will be stored in migration entries iff there're enough
55+
* free bits in arch specific swp offset. By default we'll ignore A/D bits
56+
* when migrating a page. Please refer to migration_entry_supports_ad()
57+
* for more information. If there're more bits besides PFN and A/D bits,
58+
* they should be reserved and always be zeros.
59+
*/
60+
#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
61+
#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
62+
#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
63+
64+
#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
65+
#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
66+
3867
static inline bool is_pfn_swap_entry(swp_entry_t entry);
3968

4069
/* Clear all flags but only keep swp_entry_t related information */
@@ -265,6 +294,57 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
265294
return swp_entry(SWP_MIGRATION_WRITE, offset);
266295
}
267296

297+
/*
298+
* Returns whether the host has large enough swap offset field to support
299+
* carrying over pgtable A/D bits for page migrations. The result is
300+
* pretty much arch specific.
301+
*/
302+
static inline bool migration_entry_supports_ad(void)
303+
{
304+
/*
305+
* max_swapfile_size() returns the max supported swp-offset plus 1.
306+
* We can support the migration A/D bits iff the pfn swap entry has
307+
* the offset large enough to cover all of them (PFN, A & D bits).
308+
*/
309+
#ifdef CONFIG_SWAP
310+
return max_swapfile_size() >= (1UL << SWP_MIG_TOTAL_BITS);
311+
#else /* CONFIG_SWAP */
312+
return false;
313+
#endif /* CONFIG_SWAP */
314+
}
315+
316+
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
317+
{
318+
if (migration_entry_supports_ad())
319+
return swp_entry(swp_type(entry),
320+
swp_offset(entry) | SWP_MIG_YOUNG);
321+
return entry;
322+
}
323+
324+
static inline bool is_migration_entry_young(swp_entry_t entry)
325+
{
326+
if (migration_entry_supports_ad())
327+
return swp_offset(entry) & SWP_MIG_YOUNG;
328+
/* Keep the old behavior of aging page after migration */
329+
return false;
330+
}
331+
332+
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
333+
{
334+
if (migration_entry_supports_ad())
335+
return swp_entry(swp_type(entry),
336+
swp_offset(entry) | SWP_MIG_DIRTY);
337+
return entry;
338+
}
339+
340+
static inline bool is_migration_entry_dirty(swp_entry_t entry)
341+
{
342+
if (migration_entry_supports_ad())
343+
return swp_offset(entry) & SWP_MIG_DIRTY;
344+
/* Keep the old behavior of clean page after migration */
345+
return false;
346+
}
347+
268348
extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
269349
spinlock_t *ptl);
270350
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
@@ -311,6 +391,25 @@ static inline int is_readable_migration_entry(swp_entry_t entry)
311391
return 0;
312392
}
313393

394+
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
395+
{
396+
return entry;
397+
}
398+
399+
static inline bool is_migration_entry_young(swp_entry_t entry)
400+
{
401+
return false;
402+
}
403+
404+
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
405+
{
406+
return entry;
407+
}
408+
409+
static inline bool is_migration_entry_dirty(swp_entry_t entry)
410+
{
411+
return false;
412+
}
314413
#endif /* CONFIG_MIGRATION */
315414

316415
typedef unsigned long pte_marker;

mm/huge_memory.c

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2121,7 +2121,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
21212121
write = is_writable_migration_entry(entry);
21222122
if (PageAnon(page))
21232123
anon_exclusive = is_readable_exclusive_migration_entry(entry);
2124-
young = false;
2124+
young = is_migration_entry_young(entry);
2125+
dirty = is_migration_entry_dirty(entry);
21252126
soft_dirty = pmd_swp_soft_dirty(old_pmd);
21262127
uffd_wp = pmd_swp_uffd_wp(old_pmd);
21272128
} else {
@@ -2183,6 +2184,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
21832184
else
21842185
swp_entry = make_readable_migration_entry(
21852186
page_to_pfn(page + i));
2187+
if (young)
2188+
swp_entry = make_migration_entry_young(swp_entry);
2189+
if (dirty)
2190+
swp_entry = make_migration_entry_dirty(swp_entry);
21862191
entry = swp_entry_to_pte(swp_entry);
21872192
if (soft_dirty)
21882193
entry = pte_swp_mksoft_dirty(entry);
@@ -3201,6 +3206,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
32013206
entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
32023207
else
32033208
entry = make_readable_migration_entry(page_to_pfn(page));
3209+
if (pmd_young(pmdval))
3210+
entry = make_migration_entry_young(entry);
3211+
if (pmd_dirty(pmdval))
3212+
entry = make_migration_entry_dirty(entry);
32043213
pmdswp = swp_entry_to_pmd(entry);
32053214
if (pmd_soft_dirty(pmdval))
32063215
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
@@ -3226,13 +3235,18 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
32263235

32273236
entry = pmd_to_swp_entry(*pvmw->pmd);
32283237
get_page(new);
3229-
pmde = pmd_mkold(mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)));
3238+
pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
32303239
if (pmd_swp_soft_dirty(*pvmw->pmd))
32313240
pmde = pmd_mksoft_dirty(pmde);
32323241
if (is_writable_migration_entry(entry))
32333242
pmde = maybe_pmd_mkwrite(pmde, vma);
32343243
if (pmd_swp_uffd_wp(*pvmw->pmd))
32353244
pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
3245+
if (!is_migration_entry_young(entry))
3246+
pmde = pmd_mkold(pmde);
3247+
/* NOTE: this may contain setting soft-dirty on some archs */
3248+
if (PageDirty(new) && is_migration_entry_dirty(entry))
3249+
pmde = pmd_mkdirty(pmde);
32363250

32373251
if (PageAnon(new)) {
32383252
rmap_t rmap_flags = RMAP_COMPOUND;

mm/migrate.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,14 +198,18 @@ static bool remove_migration_pte(struct folio *folio,
198198
#endif
199199

200200
folio_get(folio);
201-
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
201+
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
202202
if (pte_swp_soft_dirty(*pvmw.pte))
203203
pte = pte_mksoft_dirty(pte);
204204

205205
/*
206206
* Recheck VMA as permissions can change since migration started
207207
*/
208208
entry = pte_to_swp_entry(*pvmw.pte);
209+
if (!is_migration_entry_young(entry))
210+
pte = pte_mkold(pte);
211+
if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
212+
pte = pte_mkdirty(pte);
209213
if (is_writable_migration_entry(entry))
210214
pte = maybe_mkwrite(pte, vma);
211215
else if (pte_swp_uffd_wp(*pvmw.pte))

mm/migrate_device.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,12 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
233233
else
234234
entry = make_readable_migration_entry(
235235
page_to_pfn(page));
236+
if (pte_present(pte)) {
237+
if (pte_young(pte))
238+
entry = make_migration_entry_young(entry);
239+
if (pte_dirty(pte))
240+
entry = make_migration_entry_dirty(entry);
241+
}
236242
swp_pte = swp_entry_to_pte(entry);
237243
if (pte_present(pte)) {
238244
if (pte_soft_dirty(pte))

mm/rmap.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2066,7 +2066,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
20662066
else
20672067
entry = make_readable_migration_entry(
20682068
page_to_pfn(subpage));
2069-
2069+
if (pte_young(pteval))
2070+
entry = make_migration_entry_young(entry);
2071+
if (pte_dirty(pteval))
2072+
entry = make_migration_entry_dirty(entry);
20702073
swp_pte = swp_entry_to_pte(entry);
20712074
if (pte_soft_dirty(pteval))
20722075
swp_pte = pte_swp_mksoft_dirty(swp_pte);

0 commit comments

Comments
 (0)