Skip to content

Commit

Permalink
kasan, mm, arm64: tag non slab memory allocated via pagealloc
Browse files Browse the repository at this point in the history
Tag-based KASAN doesn't check memory accesses through pointers tagged with
0xff.  When page_address is used to get pointer to memory that corresponds
to some page, the tag of the resulting pointer gets set to 0xff, even
though the allocated memory might have been tagged differently.

For slab pages it's impossible to recover the correct tag to return from
page_address, since the page might contain multiple slab objects tagged
with different values, and we can't know in advance which one of them is
going to get accessed.  For non slab pages however, we can recover the tag
in page_address, since the whole page was marked with the same tag.

This patch adds tagging to non slab memory allocated with pagealloc.  To
set the tag of the pointer returned from page_address, the tag gets stored
to page->flags when the memory gets allocated.

Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
xairy authored and torvalds committed Dec 28, 2018
1 parent 41eea9c commit 2813b9c
Show file tree
Hide file tree
Showing 7 changed files with 72 additions and 4 deletions.
8 changes: 7 additions & 1 deletion arch/arm64/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))

#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
__addr = __tag_set(__addr, page_kasan_tag(page)); \
((void *)__addr); \
})

#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))

#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
Expand Down
29 changes: 29 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -804,6 +804,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)

/*
* Define the bit shifts to access each section. For non-existent
Expand All @@ -814,6 +815,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))

/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
Expand All @@ -836,6 +838,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)

static inline enum zone_type page_zonenum(const struct page *page)
Expand Down Expand Up @@ -1101,6 +1104,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
}
#endif /* CONFIG_NUMA_BALANCING */

#ifdef CONFIG_KASAN_SW_TAGS
static inline u8 page_kasan_tag(const struct page *page)
{
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
}

static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
}

static inline void page_kasan_tag_reset(struct page *page)
{
page_kasan_tag_set(page, 0xff);
}
#else
static inline u8 page_kasan_tag(const struct page *page)
{
return 0xff;
}

static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { }
#endif

static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
Expand Down
10 changes: 10 additions & 0 deletions include/linux/page-flags-layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,16 @@
#define LAST_CPUPID_WIDTH 0
#endif

#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_TAG_WIDTH 8
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
> BITS_PER_LONG - NR_PAGEFLAGS
#error "KASAN: not enough bits in page flags for tag"
#endif
#else
#define KASAN_TAG_WIDTH 0
#endif

/*
* We are going to use the flags for the page to node mapping if its in
* there. This includes the case where there is no node, so it is implicit.
Expand Down
11 changes: 11 additions & 0 deletions mm/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
size_t i;
struct page *page = NULL;
int ret = -ENOMEM;

Expand Down Expand Up @@ -466,6 +467,16 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

trace_cma_alloc(pfn, page, count, align);

/*
* CMA can allocate multiple page blocks, which results in different
* blocks being marked with different tags. Reset the tags to ignore
* those page blocks.
*/
if (page) {
for (i = 0; i < count; i++)
page_kasan_tag_reset(page + i);
}

if (ret && !no_warn) {
pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, count, ret);
Expand Down
15 changes: 13 additions & 2 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,15 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)

void kasan_alloc_pages(struct page *page, unsigned int order)
{
u8 tag;
unsigned long i;

if (unlikely(PageHighMem(page)))
return;

tag = random_tag();
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}

Expand Down Expand Up @@ -319,6 +326,10 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,

void kasan_poison_slab(struct page *page)
{
unsigned long i;

for (i = 0; i < (1 << compound_order(page)); i++)
page_kasan_tag_reset(page + i);
kasan_poison_shadow(page_address(page),
PAGE_SIZE << compound_order(page),
KASAN_KMALLOC_REDZONE);
Expand Down Expand Up @@ -517,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
page = virt_to_head_page(ptr);

if (unlikely(!PageSlab(page))) {
if (reset_tag(ptr) != page_address(page)) {
if (ptr != page_address(page)) {
kasan_report_invalid_free(ptr, ip);
return;
}
Expand All @@ -530,7 +541,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)

void kasan_kfree_large(void *ptr, unsigned long ip)
{
if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
if (ptr != page_address(virt_to_head_page(ptr)))
kasan_report_invalid_free(ptr, ip);
/* The object will be poisoned by page_alloc. */
}
Expand Down
1 change: 1 addition & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1183,6 +1183,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
init_page_count(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
page_kasan_tag_reset(page);

INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
Expand Down
2 changes: 1 addition & 1 deletion mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -2357,7 +2357,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
void *freelist;
void *addr = page_address(page);

page->s_mem = addr + colour_off;
page->s_mem = kasan_reset_tag(addr) + colour_off;
page->active = 0;

if (OBJFREELIST_SLAB(cachep))
Expand Down

0 comments on commit 2813b9c

Please sign in to comment.