Skip to content

Commit a847b17

Browse files
MaxKellermannakpm00
authored andcommitted
mm: constify highmem related functions for improved const-correctness
Lots of functions in mm/highmem.c do not write to the given pointers and do not call functions that take non-const pointers and can therefore be constified. This includes functions like kunmap() which might be implemented in a way that writes to the pointer (e.g. to update reference counters or mapping fields), but currently are not. kmap() on the other hand cannot be made const because it calls set_page_address() which is non-const in some architectures/configurations. [akpm@linux-foundation.org: "fix" folio_page() build failure] Link: https://lkml.kernel.org/r/20250901205021.3573313-13-max.kellermann@ionos.com Signed-off-by: Max Kellermann <max.kellermann@ionos.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Borislav Betkov <bp@alien8.de> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christian Zankel <chris@zankel.net> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Bottomley <james.bottomley@HansenPartnership.com> Cc: Jan Kara <jack@suse.cz> Cc: Jocelyn Falempe <jfalempe@redhat.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: "Nysal Jan K.A" <nysal@linux.ibm.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russel King <linux@armlinux.org.uk> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Thomas Huth <thuth@redhat.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Wei Xu <weixugc@google.com> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent da00455 commit a847b17

File tree

6 files changed

+33
-33
lines changed

6 files changed

+33
-33
lines changed

arch/arm/include/asm/highmem.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ extern pte_t *pkmap_page_table;
4646
#endif
4747

4848
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
49-
extern void *kmap_high_get(struct page *page);
49+
extern void *kmap_high_get(const struct page *page);
5050

51-
static inline void *arch_kmap_local_high_get(struct page *page)
51+
static inline void *arch_kmap_local_high_get(const struct page *page)
5252
{
5353
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
5454
return NULL;
@@ -57,7 +57,7 @@ static inline void *arch_kmap_local_high_get(struct page *page)
5757
#define arch_kmap_local_high_get arch_kmap_local_high_get
5858

5959
#else /* ARCH_NEEDS_KMAP_HIGH_GET */
60-
static inline void *kmap_high_get(struct page *page)
60+
static inline void *kmap_high_get(const struct page *page)
6161
{
6262
return NULL;
6363
}

arch/xtensa/include/asm/highmem.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
#if DCACHE_WAY_SIZE > PAGE_SIZE
3131
#define get_pkmap_color get_pkmap_color
32-
static inline int get_pkmap_color(struct page *page)
32+
static inline int get_pkmap_color(const struct page *page)
3333
{
3434
return DCACHE_ALIAS(page_to_phys(page));
3535
}

include/linux/highmem-internal.h

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
*/
88
#ifdef CONFIG_KMAP_LOCAL
99
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10-
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
10+
void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
1111
void kunmap_local_indexed(const void *vaddr);
1212
void kmap_local_fork(struct task_struct *tsk);
1313
void __kmap_local_sched_out(void);
@@ -33,7 +33,7 @@ static inline void kmap_flush_tlb(unsigned long addr) { }
3333
#endif
3434

3535
void *kmap_high(struct page *page);
36-
void kunmap_high(struct page *page);
36+
void kunmap_high(const struct page *page);
3737
void __kmap_flush_unused(void);
3838
struct page *__kmap_to_page(void *addr);
3939

@@ -50,7 +50,7 @@ static inline void *kmap(struct page *page)
5050
return addr;
5151
}
5252

53-
static inline void kunmap(struct page *page)
53+
static inline void kunmap(const struct page *page)
5454
{
5555
might_sleep();
5656
if (!PageHighMem(page))
@@ -68,26 +68,26 @@ static inline void kmap_flush_unused(void)
6868
__kmap_flush_unused();
6969
}
7070

71-
static inline void *kmap_local_page(struct page *page)
71+
static inline void *kmap_local_page(const struct page *page)
7272
{
7373
return __kmap_local_page_prot(page, kmap_prot);
7474
}
7575

76-
static inline void *kmap_local_page_try_from_panic(struct page *page)
76+
static inline void *kmap_local_page_try_from_panic(const struct page *page)
7777
{
7878
if (!PageHighMem(page))
7979
return page_address(page);
8080
/* If the page is in HighMem, it's not safe to kmap it.*/
8181
return NULL;
8282
}
8383

84-
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
84+
static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
8585
{
86-
struct page *page = folio_page(folio, offset / PAGE_SIZE);
86+
const struct page *page = folio_page(folio, offset / PAGE_SIZE);
8787
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
8888
}
8989

90-
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
90+
static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
9191
{
9292
return __kmap_local_page_prot(page, prot);
9393
}
@@ -102,7 +102,7 @@ static inline void __kunmap_local(const void *vaddr)
102102
kunmap_local_indexed(vaddr);
103103
}
104104

105-
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
105+
static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
106106
{
107107
if (IS_ENABLED(CONFIG_PREEMPT_RT))
108108
migrate_disable();
@@ -113,7 +113,7 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
113113
return __kmap_local_page_prot(page, prot);
114114
}
115115

116-
static inline void *kmap_atomic(struct page *page)
116+
static inline void *kmap_atomic(const struct page *page)
117117
{
118118
return kmap_atomic_prot(page, kmap_prot);
119119
}
@@ -173,32 +173,32 @@ static inline void *kmap(struct page *page)
173173
return page_address(page);
174174
}
175175

176-
static inline void kunmap_high(struct page *page) { }
176+
static inline void kunmap_high(const struct page *page) { }
177177
static inline void kmap_flush_unused(void) { }
178178

179-
static inline void kunmap(struct page *page)
179+
static inline void kunmap(const struct page *page)
180180
{
181181
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
182182
kunmap_flush_on_unmap(page_address(page));
183183
#endif
184184
}
185185

186-
static inline void *kmap_local_page(struct page *page)
186+
static inline void *kmap_local_page(const struct page *page)
187187
{
188188
return page_address(page);
189189
}
190190

191-
static inline void *kmap_local_page_try_from_panic(struct page *page)
191+
static inline void *kmap_local_page_try_from_panic(const struct page *page)
192192
{
193193
return page_address(page);
194194
}
195195

196-
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
196+
static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
197197
{
198198
return folio_address(folio) + offset;
199199
}
200200

201-
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
201+
static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
202202
{
203203
return kmap_local_page(page);
204204
}
@@ -215,7 +215,7 @@ static inline void __kunmap_local(const void *addr)
215215
#endif
216216
}
217217

218-
static inline void *kmap_atomic(struct page *page)
218+
static inline void *kmap_atomic(const struct page *page)
219219
{
220220
if (IS_ENABLED(CONFIG_PREEMPT_RT))
221221
migrate_disable();
@@ -225,7 +225,7 @@ static inline void *kmap_atomic(struct page *page)
225225
return page_address(page);
226226
}
227227

228-
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
228+
static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
229229
{
230230
return kmap_atomic(page);
231231
}

include/linux/highmem.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ static inline void *kmap(struct page *page);
4343
* Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
4444
* pages in the low memory area.
4545
*/
46-
static inline void kunmap(struct page *page);
46+
static inline void kunmap(const struct page *page);
4747

4848
/**
4949
* kmap_to_page - Get the page for a kmap'ed address
@@ -93,7 +93,7 @@ static inline void kmap_flush_unused(void);
9393
* disabling migration in order to keep the virtual address stable across
9494
* preemption. No caller of kmap_local_page() can rely on this side effect.
9595
*/
96-
static inline void *kmap_local_page(struct page *page);
96+
static inline void *kmap_local_page(const struct page *page);
9797

9898
/**
9999
* kmap_local_folio - Map a page in this folio for temporary usage
@@ -129,7 +129,7 @@ static inline void *kmap_local_page(struct page *page);
129129
* Context: Can be invoked from any context.
130130
* Return: The virtual address of @offset.
131131
*/
132-
static inline void *kmap_local_folio(struct folio *folio, size_t offset);
132+
static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
133133

134134
/**
135135
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
@@ -176,7 +176,7 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
176176
* kunmap_atomic(vaddr2);
177177
* kunmap_atomic(vaddr1);
178178
*/
179-
static inline void *kmap_atomic(struct page *page);
179+
static inline void *kmap_atomic(const struct page *page);
180180

181181
/* Highmem related interfaces for management code */
182182
static inline unsigned long nr_free_highpages(void);

include/linux/page-flags.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,9 +316,9 @@ static __always_inline unsigned long _compound_head(const struct page *page)
316316
* check that the page number lies within @folio; the caller is presumed
317317
* to have a reference to the page.
318318
*/
319-
static inline struct page *folio_page(struct folio *folio, unsigned long n)
319+
static inline struct page *folio_page(const struct folio *folio, unsigned long n)
320320
{
321-
return &folio->page + n;
321+
return (struct page *)(&folio->page + n);
322322
}
323323

324324
static __always_inline int PageTail(const struct page *page)

mm/highmem.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ static inline int kmap_local_calc_idx(int idx)
6161
/*
6262
* Determine color of virtual address where the page should be mapped.
6363
*/
64-
static inline unsigned int get_pkmap_color(struct page *page)
64+
static inline unsigned int get_pkmap_color(const struct page *page)
6565
{
6666
return 0;
6767
}
@@ -334,7 +334,7 @@ EXPORT_SYMBOL(kmap_high);
334334
*
335335
* This can be called from any context.
336336
*/
337-
void *kmap_high_get(struct page *page)
337+
void *kmap_high_get(const struct page *page)
338338
{
339339
unsigned long vaddr, flags;
340340

@@ -356,7 +356,7 @@ void *kmap_high_get(struct page *page)
356356
* If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
357357
* only from user context.
358358
*/
359-
void kunmap_high(struct page *page)
359+
void kunmap_high(const struct page *page)
360360
{
361361
unsigned long vaddr;
362362
unsigned long nr;
@@ -508,7 +508,7 @@ static inline void kmap_local_idx_pop(void)
508508
#endif
509509

510510
#ifndef arch_kmap_local_high_get
511-
static inline void *arch_kmap_local_high_get(struct page *page)
511+
static inline void *arch_kmap_local_high_get(const struct page *page)
512512
{
513513
return NULL;
514514
}
@@ -572,7 +572,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
572572
}
573573
EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
574574

575-
void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
575+
void *__kmap_local_page_prot(const struct page *page, pgprot_t prot)
576576
{
577577
void *kmap;
578578

0 commit comments

Comments
 (0)