Skip to content

Commit f39af05

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: add VMA iterator
This thin layer of abstraction over the maple tree state is for iterating over VMAs. You can go forwards, go backwards or ask where the iterator is. Rename the existing vma_next() to __vma_next() -- it will be removed by the end of this series. Link: https://lkml.kernel.org/r/20220906194824.2110408-10-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Howells <dhowells@redhat.com> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent d4af56c commit f39af05

File tree

3 files changed

+58
-5
lines changed

3 files changed

+58
-5
lines changed

include/linux/mm.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -661,6 +661,38 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
661661
return vma->vm_flags & VM_ACCESS_FLAGS;
662662
}
663663

664+
static inline
665+
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
666+
{
667+
return mas_find(&vmi->mas, max);
668+
}
669+
670+
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
671+
{
672+
/*
673+
* Uses vma_find() to get the first VMA when the iterator starts.
674+
* Calling mas_next() could skip the first entry.
675+
*/
676+
return vma_find(vmi, ULONG_MAX);
677+
}
678+
679+
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
680+
{
681+
return mas_prev(&vmi->mas, 0);
682+
}
683+
684+
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
685+
{
686+
return vmi->mas.index;
687+
}
688+
689+
#define for_each_vma(__vmi, __vma) \
690+
while (((__vma) = vma_next(&(__vmi))) != NULL)
691+
692+
/* The MM code likes to work with exclusive end addresses */
693+
#define for_each_vma_range(__vmi, __vma, __end) \
694+
while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
695+
664696
#ifdef CONFIG_SHMEM
665697
/*
666698
* The vma_is_shmem is not inline because it is used only by slow

include/linux/mm_types.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -777,6 +777,27 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
777777

778778
#endif /* CONFIG_LRU_GEN */
779779

780+
struct vma_iterator {
781+
struct ma_state mas;
782+
};
783+
784+
#define VMA_ITERATOR(name, __mm, __addr) \
785+
struct vma_iterator name = { \
786+
.mas = { \
787+
.tree = &(__mm)->mm_mt, \
788+
.index = __addr, \
789+
.node = MAS_START, \
790+
}, \
791+
}
792+
793+
static inline void vma_iter_init(struct vma_iterator *vmi,
794+
struct mm_struct *mm, unsigned long addr)
795+
{
796+
vmi->mas.tree = &mm->mm_mt;
797+
vmi->mas.index = addr;
798+
vmi->mas.node = MAS_START;
799+
}
800+
780801
struct mmu_gather;
781802
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
782803
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);

mm/mmap.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -586,15 +586,15 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
586586
}
587587

588588
/*
589-
* vma_next() - Get the next VMA.
589+
* __vma_next() - Get the next VMA.
590590
* @mm: The mm_struct.
591591
* @vma: The current vma.
592592
*
593593
* If @vma is NULL, return the first vma in the mm.
594594
*
595595
* Returns: The next VMA after @vma.
596596
*/
597-
static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
597+
static inline struct vm_area_struct *__vma_next(struct mm_struct *mm,
598598
struct vm_area_struct *vma)
599599
{
600600
if (!vma)
@@ -1291,7 +1291,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
12911291
if (vm_flags & VM_SPECIAL)
12921292
return NULL;
12931293

1294-
next = vma_next(mm, prev);
1294+
next = __vma_next(mm, prev);
12951295
area = next;
12961296
if (area && area->vm_end == end) /* cases 6, 7, 8 */
12971297
next = next->vm_next;
@@ -2843,7 +2843,7 @@ static void unmap_region(struct mm_struct *mm,
28432843
struct vm_area_struct *vma, struct vm_area_struct *prev,
28442844
unsigned long start, unsigned long end)
28452845
{
2846-
struct vm_area_struct *next = vma_next(mm, prev);
2846+
struct vm_area_struct *next = __vma_next(mm, prev);
28472847
struct mmu_gather tlb;
28482848

28492849
lru_add_drain();
@@ -3051,7 +3051,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
30513051
if (error)
30523052
goto split_failed;
30533053
}
3054-
vma = vma_next(mm, prev);
3054+
vma = __vma_next(mm, prev);
30553055

30563056
if (unlikely(uf)) {
30573057
/*

0 commit comments

Comments
 (0)