Skip to content

Commit

Permalink
mm/rmap: convert RMAP flags to proper a distinct rmap_t type
Browse files Browse the repository at this point in the history
We want to pass the flags to more than one anon rmap function, getting
rid of special "do_page_add_anon_rmap()". So let's pass around a distinct
__bitwise type and refine documentation.

Signed-off-by: David Hildenbrand <david@redhat.com>
  • Loading branch information
davidhildenbrand committed Feb 25, 2022
1 parent 60db3e2 commit 1429b19
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 9 deletions.
22 changes: 18 additions & 4 deletions include/linux/rmap.h
Expand Up @@ -158,9 +158,23 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,

struct anon_vma *page_get_anon_vma(struct page *page);

/* bitflags for do_page_add_anon_rmap() */
#define RMAP_EXCLUSIVE 0x01
#define RMAP_COMPOUND 0x02
/* RMAP flags, currently only relevent for some anon rmap operations. */
typedef int __bitwise rmap_t;

/*
* No special request: the page mapped via a small (sub)page and is possibly
* shared betweeen processes.
*/
#define RMAP_NONE ((__force rmap_t)0)

/* The page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))

/*
* The page is mapped via a compound page, not via a small subpage, and
* should be accounted accordingly.
*/
#define RMAP_COMPOUND ((__force rmap_t)BIT(1))

/*
* rmap interfaces called when adding or removing pte of page
Expand All @@ -169,7 +183,7 @@ void page_move_anon_rmap(struct page *, struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, bool);
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, int);
unsigned long, rmap_t);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, bool);
void page_add_file_rmap(struct page *, bool);
Expand Down
6 changes: 3 additions & 3 deletions mm/memory.c
Expand Up @@ -3515,10 +3515,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL, *swapcache;
struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
swp_entry_t entry;
pte_t pte;
int locked;
int exclusive = 0;
vm_fault_t ret = 0;
void *shadow = NULL;

Expand Down Expand Up @@ -3693,7 +3693,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE;
ret |= VM_FAULT_WRITE;
exclusive = RMAP_EXCLUSIVE;
rmap_flags |= RMAP_EXCLUSIVE;
}
flush_icache_page(vma, page);
if (pte_swp_soft_dirty(vmf->orig_pte))
Expand All @@ -3709,7 +3709,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page_add_new_anon_rmap(page, vma, vmf->address, false);
lru_cache_add_inactive_or_unevictable(page, vma);
} else {
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
do_page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
}

set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
Expand Down
5 changes: 3 additions & 2 deletions mm/rmap.c
Expand Up @@ -1139,7 +1139,8 @@ static void __page_check_anon_rmap(struct page *page,
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, bool compound)
{
do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
do_page_add_anon_rmap(page, vma, address,
compound ? RMAP_COMPOUND : RMAP_NONE);
}

/*
Expand All @@ -1148,7 +1149,7 @@ void page_add_anon_rmap(struct page *page,
* Everybody else should continue to use page_add_anon_rmap above.
*/
void do_page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int flags)
struct vm_area_struct *vma, unsigned long address, rmap_t flags)
{
bool compound = flags & RMAP_COMPOUND;
bool first;
Expand Down

0 comments on commit 1429b19

Please sign in to comment.