Skip to content

Commit

Permalink
mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
Browse files Browse the repository at this point in the history
commit 47ebd03 upstream.

As reported by Dipanjan Das, when KMSAN is used together with kernel fault
injection (or, generally, even without the latter), calls to kcalloc() or
__vmap_pages_range_noflush() may fail, leaving the metadata mappings for
the virtual mapping in an inconsistent state.  When these metadata
mappings are accessed later, the kernel crashes.

To address the problem, we return a non-zero error code from
kmsan_vmap_pages_range_noflush() in the case of any allocation/mapping
failure inside it, and make vmap_pages_range_noflush() return an error if
KMSAN fails to allocate the metadata.

This patch also removes KMSAN_WARN_ON() from vmap_pages_range_noflush(),
as these allocation failures are not fatal anymore.

Link: https://lkml.kernel.org/r/20230413131223.4135168-1-glider@google.com
Fixes: b073d7f ("mm: kmsan: maintain KMSAN metadata for page operations")
Signed-off-by: Alexander Potapenko <glider@google.com>
Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
  Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Reviewed-by: Marco Elver <elver@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
ramosian-glider authored and gregkh committed Apr 26, 2023
1 parent 433a7ec commit bd6f342
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 19 deletions.
20 changes: 11 additions & 9 deletions include/linux/kmsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
* @page_shift: page_shift passed to vmap_range_noflush().
*
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
* vmalloc metadata address range.
* vmalloc metadata address range. Returns 0 on success, callers must check
* for non-zero return value.
*/
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift);
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift);

/**
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
Expand Down Expand Up @@ -282,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
{
}

static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
unsigned long end,
pgprot_t prot,
struct page **pages,
unsigned int page_shift)
static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
unsigned long end,
pgprot_t prot,
struct page **pages,
unsigned int page_shift)
{
return 0;
}

static inline void kmsan_vunmap_range_noflush(unsigned long start,
Expand Down
27 changes: 18 additions & 9 deletions mm/kmsan/shadow.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
kmsan_leave_runtime();
}

void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift)
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift)
{
unsigned long shadow_start, origin_start, shadow_end, origin_end;
struct page **s_pages, **o_pages;
int nr, mapped;
int nr, mapped, err = 0;

if (!kmsan_enabled)
return;
return 0;

shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
if (!shadow_start)
return;
return 0;

nr = (end - start) / PAGE_SIZE;
s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
if (!s_pages || !o_pages)
if (!s_pages || !o_pages) {
err = -ENOMEM;
goto ret;
}
for (int i = 0; i < nr; i++) {
s_pages[i] = shadow_page_for(pages[i]);
o_pages[i] = origin_page_for(pages[i]);
Expand All @@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
kmsan_enter_runtime();
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
s_pages, page_shift);
KMSAN_WARN_ON(mapped);
if (mapped) {
err = mapped;
goto ret;
}
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
o_pages, page_shift);
KMSAN_WARN_ON(mapped);
if (mapped) {
err = mapped;
goto ret;
}
kmsan_leave_runtime();
flush_tlb_kernel_range(shadow_start, shadow_end);
flush_tlb_kernel_range(origin_start, origin_end);
Expand All @@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
ret:
kfree(s_pages);
kfree(o_pages);
return err;
}

/* Allocate metadata for pages allocated at boot time. */
Expand Down
6 changes: 5 additions & 1 deletion mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
page_shift);

if (ret)
return ret;
return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
}

Expand Down

0 comments on commit bd6f342

Please sign in to comment.