Skip to content
Permalink
Browse files
mm: Start tracking VMAs with maple tree
Start tracking the VMAs with the new maple tree structure in parallel
with the rb_tree.  Add debug and trace events for maple tree operations
and duplicate the rb_tree that is created on forks into the maple tree.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
  • Loading branch information
howlett authored and intel-lab-lkp committed Dec 10, 2020
1 parent d79b3ea commit 991a17ff32a6bc9549655cd0aea43c386fdea1c6
Show file tree
Hide file tree
Showing 9 changed files with 336 additions and 3 deletions.
@@ -90,6 +90,7 @@ void __init tboot_probe(void)
static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
.mm_rb = RB_ROOT,
.mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
@@ -55,6 +55,7 @@ static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;

struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
MMAP_LOCK_INITIALIZER(efi_mm)
@@ -2467,6 +2467,8 @@ extern bool arch_has_descending_max_zone_pfns(void);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* maple_tree */
void vma_store(struct mm_struct *mm, struct vm_area_struct *vma);

/* interval_tree.c */
void vma_interval_tree_insert(struct vm_area_struct *node,
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -388,6 +389,7 @@ struct kioctx_table;
struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct maple_tree mm_mt;
struct rb_root mm_rb;
u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
@@ -42,6 +42,103 @@ TRACE_EVENT(vm_unmapped_area,
__entry->low_limit, __entry->high_limit, __entry->align_mask,
__entry->align_offset)
);
TRACE_EVENT(vma_mt_erase,

TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma),

TP_ARGS(mm, vma),

TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(struct vm_area_struct *, vma)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
),

TP_fast_assign(
__entry->mm = mm;
__entry->vma = vma;
__entry->vm_start = vma->vm_start;
__entry->vm_end = vma->vm_end - 1;
),

TP_printk("mt_mod %px, (%px), ERASE, %lu, %lu,",
__entry->mm, __entry->vma,
(unsigned long) __entry->vm_start,
(unsigned long) __entry->vm_end
)
);

TRACE_EVENT(vma_mt_szero,
TP_PROTO(struct mm_struct *mm, unsigned long start,
unsigned long end),

TP_ARGS(mm, start, end),

TP_STRUCT__entry(
__field(struct mm_struct*, mm)
__field(unsigned long, start)
__field(unsigned long, end)
),

TP_fast_assign(
__entry->mm = mm;
__entry->start = start;
__entry->end = end - 1;
),

TP_printk("mt_mod %px, (NULL), SNULL, %lu, %lu,",
__entry->mm,
(unsigned long) __entry->start,
(unsigned long) __entry->end
)
);

TRACE_EVENT(vma_mt_store,
TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma),

TP_ARGS(mm, vma),

TP_STRUCT__entry(
__field(struct mm_struct*, mm)
__field(struct vm_area_struct*, vma)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
),

TP_fast_assign(
__entry->mm = mm;
__entry->vma = vma;
__entry->vm_start = vma->vm_start;
__entry->vm_end = vma->vm_end - 1;
),

TP_printk("mt_mod %px, (%px), STORE, %lu, %lu,",
__entry->mm, __entry->vma,
(unsigned long) __entry->vm_start,
(unsigned long) __entry->vm_end
)
);


TRACE_EVENT(exit_mmap,
TP_PROTO(struct mm_struct *mm),

TP_ARGS(mm),

TP_STRUCT__entry(
__field(struct mm_struct*, mm)
),

TP_fast_assign(
__entry->mm = mm;
),

TP_printk("mt_mod %px, DESTROY\n",
__entry->mm
)
);

#endif

/* This part must be outside protection */
@@ -114,6 +114,7 @@ static int kernel_init(void *);

extern void init_IRQ(void);
extern void radix_tree_init(void);
extern void maple_tree_init(void);

/*
* Debug helper: via this flag we know that we are in 'early bootup code'
@@ -913,6 +914,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
radix_tree_init();
maple_tree_init();

/*
* Set up housekeeping before setting up workqueues to allow the unbound
@@ -588,6 +588,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;

/* Link the vma into the MT */
vma_store(mm, tmp);

mm->map_count++;
if (!(tmp->vm_flags & VM_WIPEONFORK))
retval = copy_page_range(tmp, mpnt);
@@ -1004,6 +1007,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
{
mm->mmap = NULL;
mm->mm_rb = RB_ROOT;
mt_init_flags(&mm->mm_mt, MAPLE_ALLOC_RANGE);
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/list.h>
@@ -28,6 +29,7 @@
*/
struct mm_struct init_mm = {
.mm_rb = RB_ROOT,
.mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),

0 comments on commit 991a17f

Please sign in to comment.