Skip to content

Commit

Permalink
xen/arm: page: Avoid pointer overflow on cache clean & invalidate
Browse files Browse the repository at this point in the history
On Arm32, after cleaning and invalidating the last dcache line of the top
domheap page i.e. VA = 0xfffff000 (as a result of flushing the page to
RAM), we end up adding the value of a dcache line size to the pointer
once again, which results in a pointer arithmetic overflow (with 64B line
size, operation 0xffffffc0 + 0x40 overflows to 0x0). Such behavior is
undefined and given the wide range of compiler versions we support, it is
difficult to determine what could happen in such scenario.

Modify clean_and_invalidate_dcache_va_range() as well as
clean_dcache_va_range() and invalidate_dcache_va_range() due to similarity
of handling to prevent pointer arithmetic overflow. Modify the loops to
use an additional variable to store the index of the next cacheline.
Add an assert to prevent passing a region that wraps around which is
illegal and would end up in a page fault anyway (region 0-2MB is
unmapped). Lastly, return early if size passed is 0.

Note that on Arm64, we don't have this problem given that the max VA
space we support is 48-bits.

This is XSA-447 / CVE-2023-46837.

Signed-off-by: Michal Orzel <michal.orzel@amd.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
  • Loading branch information
orzelmichal authored and jbeulich committed Dec 12, 2023
1 parent 0fb4b6b commit 190b7f4
Showing 1 changed file with 28 additions and 7 deletions.
35 changes: 28 additions & 7 deletions xen/arch/arm/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,13 @@ static inline size_t read_dcache_line_bytes(void)
static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
{
size_t cacheline_mask = dcache_line_bytes - 1;
unsigned long idx = 0;

if ( !size )
return 0;

/* Passing a region that wraps around is illegal */
ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);

dsb(sy); /* So the CPU issues all writes to the range */

Expand All @@ -174,11 +181,11 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
}

for ( ; size >= dcache_line_bytes;
p += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__invalidate_dcache_one(0) : : "r" (p));
idx += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__invalidate_dcache_one(0) : : "r" (p + idx));

if ( size > 0 )
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx));

dsb(sy); /* So we know the flushes happen before continuing */

Expand All @@ -188,14 +195,21 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
static inline int clean_dcache_va_range(const void *p, unsigned long size)
{
size_t cacheline_mask = dcache_line_bytes - 1;
unsigned long idx = 0;

if ( !size )
return 0;

/* Passing a region that wraps around is illegal */
ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);

dsb(sy); /* So the CPU issues all writes to the range */
size += (uintptr_t)p & cacheline_mask;
size = (size + cacheline_mask) & ~cacheline_mask;
p = (void *)((uintptr_t)p & ~cacheline_mask);
for ( ; size >= dcache_line_bytes;
p += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_dcache_one(0) : : "r" (p));
idx += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_dcache_one(0) : : "r" (p + idx));
dsb(sy); /* So we know the flushes happen before continuing */
/* ARM callers assume that dcache_* functions cannot fail. */
return 0;
Expand All @@ -205,14 +219,21 @@ static inline int clean_and_invalidate_dcache_va_range
(const void *p, unsigned long size)
{
size_t cacheline_mask = dcache_line_bytes - 1;
unsigned long idx = 0;

if ( !size )
return 0;

/* Passing a region that wraps around is illegal */
ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);

dsb(sy); /* So the CPU issues all writes to the range */
size += (uintptr_t)p & cacheline_mask;
size = (size + cacheline_mask) & ~cacheline_mask;
p = (void *)((uintptr_t)p & ~cacheline_mask);
for ( ; size >= dcache_line_bytes;
p += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
idx += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx));
dsb(sy); /* So we know the flushes happen before continuing */
/* ARM callers assume that dcache_* functions cannot fail. */
return 0;
Expand Down

0 comments on commit 190b7f4

Please sign in to comment.