Skip to content

Commit da00455

Browse files
MaxKellermannakpm00
authored andcommitted
mm: constify assert/test functions in mm.h
For improved const-correctness. We select certain assert and test functions which either invoke each other, functions that are already const-ified, or no further functions. It is therefore relatively trivial to const-ify them, which provides a basis for further const-ification further up the call stack. Link: https://lkml.kernel.org/r/20250901205021.3573313-12-max.kellermann@ionos.com Signed-off-by: Max Kellermann <max.kellermann@ionos.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Borislav Betkov <bp@alien8.de> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christian Zankel <chris@zankel.net> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Bottomley <james.bottomley@HansenPartnership.com> Cc: Jan Kara <jack@suse.cz> Cc: Jocelyn Falempe <jfalempe@redhat.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: "Nysal Jan K.A" <nysal@linux.ibm.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russel King <linux@armlinux.org.uk> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Thomas Huth <thuth@redhat.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Wei Xu <weixugc@google.com> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent f346a94 commit da00455

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

include/linux/mm.h

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -719,7 +719,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
719719
mmap_read_unlock(vmf->vma->vm_mm);
720720
}
721721

722-
static inline void assert_fault_locked(struct vm_fault *vmf)
722+
static inline void assert_fault_locked(const struct vm_fault *vmf)
723723
{
724724
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
725725
vma_assert_locked(vmf->vma);
@@ -732,7 +732,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
732732
mmap_read_unlock(vmf->vma->vm_mm);
733733
}
734734

735-
static inline void assert_fault_locked(struct vm_fault *vmf)
735+
static inline void assert_fault_locked(const struct vm_fault *vmf)
736736
{
737737
mmap_assert_locked(vmf->vma->vm_mm);
738738
}
@@ -875,7 +875,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
875875
vma->vm_end >= vma->vm_mm->start_stack;
876876
}
877877

878-
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
878+
static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
879879
{
880880
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
881881

@@ -889,7 +889,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
889889
return false;
890890
}
891891

892-
static inline bool vma_is_foreign(struct vm_area_struct *vma)
892+
static inline bool vma_is_foreign(const struct vm_area_struct *vma)
893893
{
894894
if (!current->mm)
895895
return true;
@@ -900,7 +900,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
900900
return false;
901901
}
902902

903-
static inline bool vma_is_accessible(struct vm_area_struct *vma)
903+
static inline bool vma_is_accessible(const struct vm_area_struct *vma)
904904
{
905905
return vma->vm_flags & VM_ACCESS_FLAGS;
906906
}
@@ -911,7 +911,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
911911
(VM_SHARED | VM_MAYWRITE);
912912
}
913913

914-
static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
914+
static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
915915
{
916916
return is_shared_maywrite(vma->vm_flags);
917917
}
@@ -1855,7 +1855,7 @@ static inline struct folio *pfn_folio(unsigned long pfn)
18551855
}
18561856

18571857
#ifdef CONFIG_MMU
1858-
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1858+
static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
18591859
{
18601860
return pfn_pte(page_to_pfn(page), pgprot);
18611861
}
@@ -1870,7 +1870,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
18701870
*
18711871
* Return: A page table entry suitable for mapping this folio.
18721872
*/
1873-
static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
1873+
static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
18741874
{
18751875
return pfn_pte(folio_pfn(folio), pgprot);
18761876
}
@@ -1886,7 +1886,7 @@ static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
18861886
*
18871887
* Return: A page table entry suitable for mapping this folio.
18881888
*/
1889-
static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
1889+
static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
18901890
{
18911891
return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
18921892
}
@@ -1902,7 +1902,7 @@ static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
19021902
*
19031903
* Return: A page table entry suitable for mapping this folio.
19041904
*/
1905-
static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot)
1905+
static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
19061906
{
19071907
return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
19081908
}
@@ -3520,7 +3520,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
35203520
return mtree_load(&mm->mm_mt, addr);
35213521
}
35223522

3523-
static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3523+
static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
35243524
{
35253525
if (vma->vm_flags & VM_GROWSDOWN)
35263526
return stack_guard_gap;
@@ -3532,7 +3532,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
35323532
return 0;
35333533
}
35343534

3535-
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3535+
static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
35363536
{
35373537
unsigned long gap = stack_guard_start_gap(vma);
35383538
unsigned long vm_start = vma->vm_start;
@@ -3543,7 +3543,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
35433543
return vm_start;
35443544
}
35453545

3546-
static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3546+
static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
35473547
{
35483548
unsigned long vm_end = vma->vm_end;
35493549

@@ -3555,7 +3555,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
35553555
return vm_end;
35563556
}
35573557

3558-
static inline unsigned long vma_pages(struct vm_area_struct *vma)
3558+
static inline unsigned long vma_pages(const struct vm_area_struct *vma)
35593559
{
35603560
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
35613561
}
@@ -3572,7 +3572,7 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
35723572
return vma;
35733573
}
35743574

3575-
static inline bool range_in_vma(struct vm_area_struct *vma,
3575+
static inline bool range_in_vma(const struct vm_area_struct *vma,
35763576
unsigned long start, unsigned long end)
35773577
{
35783578
return (vma && vma->vm_start <= start && end <= vma->vm_end);
@@ -3688,7 +3688,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
36883688
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
36893689
* a (NUMA hinting) fault is required.
36903690
*/
3691-
static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3691+
static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
36923692
unsigned int flags)
36933693
{
36943694
/*
@@ -3818,7 +3818,7 @@ static inline bool debug_guardpage_enabled(void)
38183818
return static_branch_unlikely(&_debug_guardpage_enabled);
38193819
}
38203820

3821-
static inline bool page_is_guard(struct page *page)
3821+
static inline bool page_is_guard(const struct page *page)
38223822
{
38233823
if (!debug_guardpage_enabled())
38243824
return false;
@@ -3849,7 +3849,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
38493849
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
38503850
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
38513851
static inline bool debug_guardpage_enabled(void) { return false; }
3852-
static inline bool page_is_guard(struct page *page) { return false; }
3852+
static inline bool page_is_guard(const struct page *page) { return false; }
38533853
static inline bool set_page_guard(struct zone *zone, struct page *page,
38543854
unsigned int order) { return false; }
38553855
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -3931,7 +3931,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
39313931
#endif
39323932

39333933
#ifdef CONFIG_SPARSEMEM_VMEMMAP
3934-
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3934+
static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
39353935
{
39363936
/* number of pfns from base where pfn_to_page() is valid */
39373937
if (altmap)
@@ -3945,7 +3945,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
39453945
altmap->alloc -= nr_pfns;
39463946
}
39473947
#else
3948-
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3948+
static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
39493949
{
39503950
return 0;
39513951
}

0 commit comments

Comments
 (0)