Skip to content

Commit 91d0ec8

Browse files
Pintu Kumarakpm00
authored andcommitted
zsmalloc: replace kmap_atomic with kmap_local_page
The use of kmap_atomic/kunmap_atomic is deprecated. Replace it will kmap_local_page/kunmap_local all over the place. Also fix SPDX missing license header. WARNING: Missing or malformed SPDX-License-Identifier tag in line 1 WARNING: Deprecated use of 'kmap_atomic', prefer 'kmap_local_page' instead + vaddr = kmap_atomic(page); Link: https://lkml.kernel.org/r/20241001175358.12970-1-quic_pintu@quicinc.com Signed-off-by: Pintu Kumar <quic_pintu@quicinc.com> Cc: Joe Perches <joe@perches.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Pintu Agarwal <pintu.ping@gmail.com> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent b7fc16a commit 91d0ec8

File tree

1 file changed

+34
-32
lines changed

1 file changed

+34
-32
lines changed

mm/zsmalloc.c

Lines changed: 34 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
// SPDX-License-Identifier: GPL-2.0-or-later
2+
13
/*
24
* zsmalloc memory allocator
35
*
@@ -898,7 +900,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
898900

899901
set_first_obj_offset(page, off);
900902

901-
vaddr = kmap_atomic(page);
903+
vaddr = kmap_local_page(page);
902904
link = (struct link_free *)vaddr + off / sizeof(*link);
903905

904906
while ((off += class->size) < PAGE_SIZE) {
@@ -921,7 +923,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
921923
*/
922924
link->next = -1UL << OBJ_TAG_BITS;
923925
}
924-
kunmap_atomic(vaddr);
926+
kunmap_local(vaddr);
925927
page = next_page;
926928
off %= PAGE_SIZE;
927929
}
@@ -1059,12 +1061,12 @@ static void *__zs_map_object(struct mapping_area *area,
10591061
sizes[1] = size - sizes[0];
10601062

10611063
/* copy object to per-cpu buffer */
1062-
addr = kmap_atomic(pages[0]);
1064+
addr = kmap_local_page(pages[0]);
10631065
memcpy(buf, addr + off, sizes[0]);
1064-
kunmap_atomic(addr);
1065-
addr = kmap_atomic(pages[1]);
1066+
kunmap_local(addr);
1067+
addr = kmap_local_page(pages[1]);
10661068
memcpy(buf + sizes[0], addr, sizes[1]);
1067-
kunmap_atomic(addr);
1069+
kunmap_local(addr);
10681070
out:
10691071
return area->vm_buf;
10701072
}
@@ -1089,12 +1091,12 @@ static void __zs_unmap_object(struct mapping_area *area,
10891091
sizes[1] = size - sizes[0];
10901092

10911093
/* copy per-cpu buffer to object */
1092-
addr = kmap_atomic(pages[0]);
1094+
addr = kmap_local_page(pages[0]);
10931095
memcpy(addr + off, buf, sizes[0]);
1094-
kunmap_atomic(addr);
1095-
addr = kmap_atomic(pages[1]);
1096+
kunmap_local(addr);
1097+
addr = kmap_local_page(pages[1]);
10961098
memcpy(addr, buf + sizes[0], sizes[1]);
1097-
kunmap_atomic(addr);
1099+
kunmap_local(addr);
10981100

10991101
out:
11001102
/* enable page faults to match kunmap_atomic() return conditions */
@@ -1223,7 +1225,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
12231225
area->vm_mm = mm;
12241226
if (off + class->size <= PAGE_SIZE) {
12251227
/* this object is contained entirely within a page */
1226-
area->vm_addr = kmap_atomic(page);
1228+
area->vm_addr = kmap_local_page(page);
12271229
ret = area->vm_addr + off;
12281230
goto out;
12291231
}
@@ -1260,7 +1262,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
12601262

12611263
area = this_cpu_ptr(&zs_map_area);
12621264
if (off + class->size <= PAGE_SIZE)
1263-
kunmap_atomic(area->vm_addr);
1265+
kunmap_local(area->vm_addr);
12641266
else {
12651267
struct page *pages[2];
12661268

@@ -1318,7 +1320,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
13181320
for (i = 0; i < nr_page; i++)
13191321
m_page = get_next_page(m_page);
13201322

1321-
vaddr = kmap_atomic(m_page);
1323+
vaddr = kmap_local_page(m_page);
13221324
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
13231325
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
13241326
if (likely(!ZsHugePage(zspage)))
@@ -1328,7 +1330,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
13281330
/* record handle to page->index */
13291331
zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
13301332

1331-
kunmap_atomic(vaddr);
1333+
kunmap_local(vaddr);
13321334
mod_zspage_inuse(zspage, 1);
13331335

13341336
obj = location_to_obj(m_page, obj);
@@ -1419,7 +1421,7 @@ static void obj_free(int class_size, unsigned long obj)
14191421
f_offset = offset_in_page(class_size * f_objidx);
14201422
zspage = get_zspage(f_page);
14211423

1422-
vaddr = kmap_atomic(f_page);
1424+
vaddr = kmap_local_page(f_page);
14231425
link = (struct link_free *)(vaddr + f_offset);
14241426

14251427
/* Insert this object in containing zspage's freelist */
@@ -1429,7 +1431,7 @@ static void obj_free(int class_size, unsigned long obj)
14291431
f_page->index = 0;
14301432
set_freeobj(zspage, f_objidx);
14311433

1432-
kunmap_atomic(vaddr);
1434+
kunmap_local(vaddr);
14331435
mod_zspage_inuse(zspage, -1);
14341436
}
14351437

@@ -1492,8 +1494,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
14921494
if (d_off + class->size > PAGE_SIZE)
14931495
d_size = PAGE_SIZE - d_off;
14941496

1495-
s_addr = kmap_atomic(s_page);
1496-
d_addr = kmap_atomic(d_page);
1497+
s_addr = kmap_local_page(s_page);
1498+
d_addr = kmap_local_page(d_page);
14971499

14981500
while (1) {
14991501
size = min(s_size, d_size);
@@ -1516,26 +1518,26 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
15161518
* Documentation/mm/highmem.rst.
15171519
*/
15181520
if (s_off >= PAGE_SIZE) {
1519-
kunmap_atomic(d_addr);
1520-
kunmap_atomic(s_addr);
1521+
kunmap_local(d_addr);
1522+
kunmap_local(s_addr);
15211523
s_page = get_next_page(s_page);
1522-
s_addr = kmap_atomic(s_page);
1523-
d_addr = kmap_atomic(d_page);
1524+
s_addr = kmap_local_page(s_page);
1525+
d_addr = kmap_local_page(d_page);
15241526
s_size = class->size - written;
15251527
s_off = 0;
15261528
}
15271529

15281530
if (d_off >= PAGE_SIZE) {
1529-
kunmap_atomic(d_addr);
1531+
kunmap_local(d_addr);
15301532
d_page = get_next_page(d_page);
1531-
d_addr = kmap_atomic(d_page);
1533+
d_addr = kmap_local_page(d_page);
15321534
d_size = class->size - written;
15331535
d_off = 0;
15341536
}
15351537
}
15361538

1537-
kunmap_atomic(d_addr);
1538-
kunmap_atomic(s_addr);
1539+
kunmap_local(d_addr);
1540+
kunmap_local(s_addr);
15391541
}
15401542

15411543
/*
@@ -1548,7 +1550,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
15481550
unsigned int offset;
15491551
int index = *obj_idx;
15501552
unsigned long handle = 0;
1551-
void *addr = kmap_atomic(page);
1553+
void *addr = kmap_local_page(page);
15521554

15531555
offset = get_first_obj_offset(page);
15541556
offset += class->size * index;
@@ -1561,7 +1563,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
15611563
index++;
15621564
}
15631565

1564-
kunmap_atomic(addr);
1566+
kunmap_local(addr);
15651567

15661568
*obj_idx = index;
15671569

@@ -1798,14 +1800,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
17981800
migrate_write_lock(zspage);
17991801

18001802
offset = get_first_obj_offset(page);
1801-
s_addr = kmap_atomic(page);
1803+
s_addr = kmap_local_page(page);
18021804

18031805
/*
18041806
* Here, any user cannot access all objects in the zspage so let's move.
18051807
*/
1806-
d_addr = kmap_atomic(newpage);
1808+
d_addr = kmap_local_page(newpage);
18071809
copy_page(d_addr, s_addr);
1808-
kunmap_atomic(d_addr);
1810+
kunmap_local(d_addr);
18091811

18101812
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
18111813
addr += class->size) {
@@ -1818,7 +1820,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
18181820
record_obj(handle, new_obj);
18191821
}
18201822
}
1821-
kunmap_atomic(s_addr);
1823+
kunmap_local(s_addr);
18221824

18231825
replace_sub_page(class, zspage, newpage, page);
18241826
/*

0 commit comments

Comments
 (0)