Skip to content

Commit 68158bf

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: mass constification of folio/page pointers
Now that page_pgoff() takes const pointers, we can constify the pointers to a lot of functions. Link: https://lkml.kernel.org/r/20241005200121.3231142-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 713da0b commit 68158bf

File tree

7 files changed

+37
-30
lines changed

7 files changed

+37
-30
lines changed

include/linux/ksm.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
9090

9191
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
9292
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
93-
void collect_procs_ksm(struct folio *folio, struct page *page,
93+
void collect_procs_ksm(const struct folio *folio, const struct page *page,
9494
struct list_head *to_kill, int force_early);
9595
long ksm_process_profit(struct mm_struct *);
9696

@@ -122,8 +122,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
122122
{
123123
}
124124

125-
static inline void collect_procs_ksm(struct folio *folio, struct page *page,
126-
struct list_head *to_kill, int force_early)
125+
static inline void collect_procs_ksm(const struct folio *folio,
126+
const struct page *page, struct list_head *to_kill,
127+
int force_early)
127128
{
128129
}
129130

include/linux/rmap.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
171171
unlink_anon_vmas(next);
172172
}
173173

174-
struct anon_vma *folio_get_anon_vma(struct folio *folio);
174+
struct anon_vma *folio_get_anon_vma(const struct folio *folio);
175175

176176
/* RMAP flags, currently only relevant for some anon rmap operations. */
177177
typedef int __bitwise rmap_t;
@@ -194,8 +194,8 @@ enum rmap_level {
194194
RMAP_LEVEL_PMD,
195195
};
196196

197-
static inline void __folio_rmap_sanity_checks(struct folio *folio,
198-
struct page *page, int nr_pages, enum rmap_level level)
197+
static inline void __folio_rmap_sanity_checks(const struct folio *folio,
198+
const struct page *page, int nr_pages, enum rmap_level level)
199199
{
200200
/* hugetlb folios are handled separately. */
201201
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
@@ -771,14 +771,14 @@ struct rmap_walk_control {
771771
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
772772
unsigned long addr, void *arg);
773773
int (*done)(struct folio *folio);
774-
struct anon_vma *(*anon_lock)(struct folio *folio,
774+
struct anon_vma *(*anon_lock)(const struct folio *folio,
775775
struct rmap_walk_control *rwc);
776776
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
777777
};
778778

779779
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
780780
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
781-
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
781+
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
782782
struct rmap_walk_control *rwc);
783783

784784
#else /* !CONFIG_MMU */

mm/internal.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1117,10 +1117,11 @@ void ClearPageHWPoisonTakenOff(struct page *page);
11171117
bool take_page_off_buddy(struct page *page);
11181118
bool put_page_back_buddy(struct page *page);
11191119
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1120-
void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
1120+
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
11211121
struct vm_area_struct *vma, struct list_head *to_kill,
11221122
unsigned long ksm_addr);
1123-
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
1123+
unsigned long page_mapped_in_vma(const struct page *page,
1124+
struct vm_area_struct *vma);
11241125

11251126
#else
11261127
static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)

mm/ksm.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,7 +1051,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
10511051
return err;
10521052
}
10531053

1054-
static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
1054+
static inline
1055+
struct ksm_stable_node *folio_stable_node(const struct folio *folio)
10551056
{
10561057
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
10571058
}
@@ -3067,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
30673068
/*
30683069
* Collect processes when the error hit an ksm page.
30693070
*/
3070-
void collect_procs_ksm(struct folio *folio, struct page *page,
3071+
void collect_procs_ksm(const struct folio *folio, const struct page *page,
30713072
struct list_head *to_kill, int force_early)
30723073
{
30733074
struct ksm_stable_node *stable_node;

mm/memory-failure.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
445445
* Schedule a process for later kill.
446446
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
447447
*/
448-
static void __add_to_kill(struct task_struct *tsk, struct page *p,
448+
static void __add_to_kill(struct task_struct *tsk, const struct page *p,
449449
struct vm_area_struct *vma, struct list_head *to_kill,
450450
unsigned long addr)
451451
{
@@ -461,7 +461,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
461461
if (is_zone_device_page(p))
462462
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
463463
else
464-
tk->size_shift = page_shift(compound_head(p));
464+
tk->size_shift = folio_shift(page_folio(p));
465465

466466
/*
467467
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
@@ -486,7 +486,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
486486
list_add_tail(&tk->nd, to_kill);
487487
}
488488

489-
static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
489+
static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
490490
struct vm_area_struct *vma, struct list_head *to_kill,
491491
unsigned long addr)
492492
{
@@ -509,7 +509,7 @@ static bool task_in_to_kill_list(struct list_head *to_kill,
509509
return false;
510510
}
511511

512-
void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
512+
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
513513
struct vm_area_struct *vma, struct list_head *to_kill,
514514
unsigned long addr)
515515
{
@@ -606,8 +606,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
606606
/*
607607
* Collect processes when the error hit an anonymous page.
608608
*/
609-
static void collect_procs_anon(struct folio *folio, struct page *page,
610-
struct list_head *to_kill, int force_early)
609+
static void collect_procs_anon(const struct folio *folio,
610+
const struct page *page, struct list_head *to_kill,
611+
int force_early)
611612
{
612613
struct task_struct *tsk;
613614
struct anon_vma *av;
@@ -643,8 +644,9 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
643644
/*
644645
* Collect processes when the error hit a file mapped page.
645646
*/
646-
static void collect_procs_file(struct folio *folio, struct page *page,
647-
struct list_head *to_kill, int force_early)
647+
static void collect_procs_file(const struct folio *folio,
648+
const struct page *page, struct list_head *to_kill,
649+
int force_early)
648650
{
649651
struct vm_area_struct *vma;
650652
struct task_struct *tsk;
@@ -680,7 +682,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
680682
}
681683

682684
#ifdef CONFIG_FS_DAX
683-
static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
685+
static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
684686
struct vm_area_struct *vma,
685687
struct list_head *to_kill, pgoff_t pgoff)
686688
{
@@ -691,7 +693,7 @@ static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
691693
/*
692694
* Collect processes when the error hit a fsdax page.
693695
*/
694-
static void collect_procs_fsdax(struct page *page,
696+
static void collect_procs_fsdax(const struct page *page,
695697
struct address_space *mapping, pgoff_t pgoff,
696698
struct list_head *to_kill, bool pre_remove)
697699
{
@@ -725,7 +727,7 @@ static void collect_procs_fsdax(struct page *page,
725727
/*
726728
* Collect the processes who have the corrupted page mapped to kill.
727729
*/
728-
static void collect_procs(struct folio *folio, struct page *page,
730+
static void collect_procs(const struct folio *folio, const struct page *page,
729731
struct list_head *tokill, int force_early)
730732
{
731733
if (!folio->mapping)

mm/page_vma_mapped.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
337337
* outside the VMA or not present, returns -EFAULT.
338338
* Only valid for normal file or anonymous VMAs.
339339
*/
340-
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
340+
unsigned long page_mapped_in_vma(const struct page *page,
341+
struct vm_area_struct *vma)
341342
{
342-
struct folio *folio = page_folio(page);
343+
const struct folio *folio = page_folio(page);
343344
struct page_vma_mapped_walk pvmw = {
344345
.pfn = page_to_pfn(page),
345346
.nr_pages = 1,

mm/rmap.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ void __init anon_vma_init(void)
496496
* concurrently without folio lock protection). See folio_lock_anon_vma_read()
497497
* which has already covered that, and comment above remap_pages().
498498
*/
499-
struct anon_vma *folio_get_anon_vma(struct folio *folio)
499+
struct anon_vma *folio_get_anon_vma(const struct folio *folio)
500500
{
501501
struct anon_vma *anon_vma = NULL;
502502
unsigned long anon_mapping;
@@ -540,7 +540,7 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio)
540540
* reference like with folio_get_anon_vma() and then block on the mutex
541541
* on !rwc->try_lock case.
542542
*/
543-
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
543+
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
544544
struct rmap_walk_control *rwc)
545545
{
546546
struct anon_vma *anon_vma = NULL;
@@ -1271,8 +1271,9 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
12711271
* @vma: the vm area in which the mapping is added
12721272
* @address: the user virtual address mapped
12731273
*/
1274-
static void __page_check_anon_rmap(struct folio *folio, struct page *page,
1275-
struct vm_area_struct *vma, unsigned long address)
1274+
static void __page_check_anon_rmap(const struct folio *folio,
1275+
const struct page *page, struct vm_area_struct *vma,
1276+
unsigned long address)
12761277
{
12771278
/*
12781279
* The page's anon-rmap details (mapping and index) are guaranteed to
@@ -2569,7 +2570,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
25692570
anon_vma_free(root);
25702571
}
25712572

2572-
static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2573+
static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
25732574
struct rmap_walk_control *rwc)
25742575
{
25752576
struct anon_vma *anon_vma;

0 commit comments

Comments
 (0)