Skip to content

Commit

Permalink
mm/damon: Add 'damon_region' NUMA fault simulation support
Browse files Browse the repository at this point in the history
These codes development here refers to NUMA balance code,
it will cause a page_fault, in do_numa_page(), we will count
'damon_region' NUMA local and remote values.

Signed-off-by: Xin Hao <xhao@linux.alibaba.com>
Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
  • Loading branch information
Xin Hao authored and intel-lab-lkp committed Feb 16, 2022
1 parent b811e8a commit a771208
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 13 deletions.
23 changes: 19 additions & 4 deletions mm/damon/paddr.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@
#include "../internal.h"
#include "prmtv-common.h"

static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_mk_set(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
bool result = false;
struct page_vma_mapped_walk pvmw = {
.page = page,
.vma = vma,
Expand All @@ -27,10 +28,24 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,

while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte)
if (pvmw.pte) {
damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
else
if (nr_online_nodes > 1) {
result = damon_ptep_mknone(pvmw.pte, vma, addr);
if (result)
flush_tlb_page(vma, addr);
}
} else {
damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
if (nr_online_nodes > 1) {
result = damon_pmdp_mknone(pvmw.pmd, vma, addr);
if (result) {
unsigned long haddr = addr & HPAGE_PMD_MASK;

flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
}
}
}
}
return true;
}
Expand All @@ -39,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr)
{
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
.rmap_one = __damon_pa_mk_set,
.anon_lock = page_lock_anon_vma_read,
};
bool need_lock;
Expand Down
44 changes: 44 additions & 0 deletions mm/damon/prmtv-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,50 @@

#include "prmtv-common.h"

bool damon_ptep_mknone(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
{
pte_t oldpte, ptent;
bool preserve_write;

oldpte = *pte;
if (pte_protnone(oldpte))
return false;

if (pte_present(oldpte)) {
preserve_write = pte_write(oldpte);
oldpte = ptep_modify_prot_start(vma, addr, pte);
ptent = pte_modify(oldpte, PAGE_NONE);

if (preserve_write)
ptent = pte_mk_savedwrite(ptent);

ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
return true;
}
return false;
}

bool damon_pmdp_mknone(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
{
bool preserve_write;
pmd_t entry = *pmd;

if (is_huge_zero_pmd(entry) || pmd_protnone(entry))
return false;

if (pmd_present(entry)) {
preserve_write = pmd_write(entry);
entry = pmdp_invalidate(vma, addr, pmd);
entry = pmd_modify(entry, PAGE_NONE);
if (preserve_write)
entry = pmd_mk_savedwrite(entry);

set_pmd_at(vma->vm_mm, addr, pmd, entry);
return true;
}
return false;
}

/*
* Get an online page for a pfn if it's in the LRU list. Otherwise, returns
* NULL.
Expand Down
3 changes: 3 additions & 0 deletions mm/damon/prmtv-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@

#include <linux/damon.h>

bool damon_ptep_mknone(pte_t *pte, struct vm_area_struct *vma, unsigned long addr);
bool damon_pmdp_mknone(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr);

struct page *damon_get_page(unsigned long pfn);

void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
Expand Down
32 changes: 23 additions & 9 deletions mm/damon/vaddr.c
Original file line number Diff line number Diff line change
Expand Up @@ -367,17 +367,25 @@ static void damon_va_update(struct damon_ctx *ctx)
}
}

static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
static int damon_va_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
bool result = false;
pte_t *pte;
spinlock_t *ptl;

if (pmd_huge(*pmd)) {
ptl = pmd_lock(walk->mm, pmd);
if (pmd_huge(*pmd)) {
damon_pmdp_mkold(pmd, walk->mm, addr);
if (nr_online_nodes > 1)
result = damon_pmdp_mknone(pmd, walk->vma, addr);
spin_unlock(ptl);
if (result) {
unsigned long haddr = addr & HPAGE_PMD_MASK;

flush_tlb_range(walk->vma, haddr, haddr + HPAGE_PMD_SIZE);
}
return 0;
}
spin_unlock(ptl);
Expand All @@ -386,11 +394,17 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte_present(*pte))
goto out;
if (!pte_present(*pte)) {
pte_unmap_unlock(pte, ptl);
return 0;
}
damon_ptep_mkold(pte, walk->mm, addr);
out:
if (nr_online_nodes > 1)
result = damon_ptep_mknone(pte, walk->vma, addr);
pte_unmap_unlock(pte, ptl);
if (result)
flush_tlb_page(walk->vma, addr);

return 0;
}

Expand Down Expand Up @@ -450,15 +464,15 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
#define damon_mkold_hugetlb_entry NULL
#endif /* CONFIG_HUGETLB_PAGE */

static const struct mm_walk_ops damon_mkold_ops = {
.pmd_entry = damon_mkold_pmd_entry,
static const struct mm_walk_ops damon_va_ops = {
.pmd_entry = damon_va_pmd_entry,
.hugetlb_entry = damon_mkold_hugetlb_entry,
};

static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
static void damon_va_check(struct mm_struct *mm, unsigned long addr)
{
mmap_read_lock(mm);
walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
walk_page_range(mm, addr, addr + 1, &damon_va_ops, NULL);
mmap_read_unlock(mm);
}

Expand All @@ -471,7 +485,7 @@ static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);

damon_va_mkold(mm, r->sampling_addr);
damon_va_check(mm, r->sampling_addr);
}

static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
Expand Down

0 comments on commit a771208

Please sign in to comment.