Skip to content

Commit cf7adae

Browse files
committed
8275320: NMT should perform buffer overrun checks
8275320: NMT should perform buffer overrun checks 8275301: Unify C-heap buffer overrun checks into NMT Reviewed-by: simonis, zgu
1 parent 96e3607 commit cf7adae

File tree

11 files changed

+423
-49
lines changed

11 files changed

+423
-49
lines changed

src/hotspot/share/runtime/os.cpp

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -669,13 +669,14 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
669669

670670
// NMT support
671671
NMT_TrackingLevel level = MemTracker::tracking_level();
672-
size_t nmt_header_size = MemTracker::malloc_header_size(level);
672+
const size_t nmt_overhead =
673+
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);
673674

674675
#ifndef ASSERT
675-
const size_t alloc_size = size + nmt_header_size;
676+
const size_t alloc_size = size + nmt_overhead;
676677
#else
677-
const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
678-
if (size + nmt_header_size > alloc_size) { // Check for rollover.
678+
const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_overhead);
679+
if (size + nmt_overhead > alloc_size) { // Check for rollover.
679680
return NULL;
680681
}
681682
#endif
@@ -693,7 +694,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
693694
return NULL;
694695
}
695696
// Wrap memory with guard
696-
GuardedMemory guarded(ptr, size + nmt_header_size);
697+
GuardedMemory guarded(ptr, size + nmt_overhead);
697698
ptr = guarded.get_user_ptr();
698699

699700
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
@@ -741,8 +742,9 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
741742
// NMT support
742743
NMT_TrackingLevel level = MemTracker::tracking_level();
743744
void* membase = MemTracker::record_free(memblock, level);
744-
size_t nmt_header_size = MemTracker::malloc_header_size(level);
745-
void* ptr = ::realloc(membase, size + nmt_header_size);
745+
const size_t nmt_overhead =
746+
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);
747+
void* ptr = ::realloc(membase, size + nmt_overhead);
746748
return MemTracker::record_malloc(ptr, size, memflags, stack, level);
747749
#else
748750
if (memblock == NULL) {
@@ -761,7 +763,10 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
761763
if (ptr != NULL ) {
762764
GuardedMemory guarded(MemTracker::malloc_base(memblock));
763765
// Guard's user data contains NMT header
764-
size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
766+
NMT_TrackingLevel level = MemTracker::tracking_level();
767+
const size_t nmt_overhead =
768+
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);
769+
size_t memblock_size = guarded.get_user_size() - nmt_overhead;
765770
memcpy(ptr, memblock, MIN2(size, memblock_size));
766771
if (paranoid) {
767772
verify_memory(MemTracker::malloc_base(ptr));

src/hotspot/share/services/mallocSiteTable.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ volatile int MallocSiteTable::_access_count = 0;
3939
// Tracking hashtable contention
4040
NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
4141

42-
4342
/*
4443
* Initialize malloc site table.
4544
* Hashtable entry is malloc'd, so it can cause infinite recursion.
@@ -49,7 +48,6 @@ NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
4948
* time, it is in single-threaded mode from JVM perspective.
5049
*/
5150
bool MallocSiteTable::initialize() {
52-
assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
5351

5452
// Fake the call stack for hashtable entry allocation
5553
assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");

src/hotspot/share/services/mallocSiteTable.hpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,9 @@ class MallocSiteTable : AllStatic {
114114
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
115115
};
116116

117+
// The table must not be wider than the maximum value the bucket_idx field
118+
// in the malloc header can hold.
119+
STATIC_ASSERT(table_size <= MAX_MALLOCSITE_TABLE_SIZE);
117120

118121
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
119122
// once exclusive access (exclusiveLock) is requested, all shared accesses are

src/hotspot/share/services/mallocTracker.cpp

Lines changed: 107 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,13 @@
2323
*/
2424
#include "precompiled.hpp"
2525

26+
#include "runtime/os.hpp"
2627
#include "services/mallocSiteTable.hpp"
2728
#include "services/mallocTracker.hpp"
2829
#include "services/mallocTracker.inline.hpp"
2930
#include "services/memTracker.hpp"
31+
#include "utilities/debug.hpp"
32+
#include "utilities/ostream.hpp"
3033

3134
size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
3235

@@ -103,15 +106,118 @@ void MallocMemorySummary::initialize() {
103106
::new ((void*)_snapshot)MallocMemorySnapshot();
104107
}
105108

106-
void MallocHeader::release() const {
109+
void MallocHeader::mark_block_as_dead() {
110+
_canary = _header_canary_dead_mark;
111+
NOT_LP64(_alt_canary = _header_alt_canary_dead_mark);
112+
set_footer(_footer_canary_dead_mark);
113+
}
114+
115+
void MallocHeader::release() {
107116
// Tracking already shutdown, no housekeeping is needed anymore
108117
if (MemTracker::tracking_level() <= NMT_minimal) return;
109118

119+
check_block_integrity();
120+
110121
MallocMemorySummary::record_free(size(), flags());
111122
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
112123
if (MemTracker::tracking_level() == NMT_detail) {
113124
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
114125
}
126+
127+
mark_block_as_dead();
128+
}
129+
130+
void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const {
131+
assert(bad_address >= (address)this, "sanity");
132+
133+
// This function prints block information, including hex dump, in case of a detected
134+
// corruption. The hex dump should show both block header and corruption site
135+
// (which may or may not be close together or identical). Plus some surrounding area.
136+
//
137+
// Note that we use os::print_hex_dump(), which is able to cope with unmapped
138+
// memory (it uses SafeFetch).
139+
140+
st->print_cr("NMT Block at " PTR_FORMAT ", corruption at: " PTR_FORMAT ": ",
141+
p2i(this), p2i(bad_address));
142+
static const size_t min_dump_length = 256;
143+
address from1 = align_down((address)this, sizeof(void*)) - (min_dump_length / 2);
144+
address to1 = from1 + min_dump_length;
145+
address from2 = align_down(bad_address, sizeof(void*)) - (min_dump_length / 2);
146+
address to2 = from2 + min_dump_length;
147+
if (from2 > to1) {
148+
// Dump gets too large, split up in two sections.
149+
os::print_hex_dump(st, from1, to1, 1);
150+
st->print_cr("...");
151+
os::print_hex_dump(st, from2, to2, 1);
152+
} else {
153+
// print one hex dump
154+
os::print_hex_dump(st, from1, to2, 1);
155+
}
156+
}
157+
158+
// Check block integrity. If block is broken, print out a report
159+
// to tty (optionally with hex dump surrounding the broken block),
160+
// then trigger a fatal error.
161+
void MallocHeader::check_block_integrity() const {
162+
163+
#define PREFIX "NMT corruption: "
164+
// Note: if you modify the error messages here, make sure you
165+
// adapt the associated gtests too.
166+
167+
// Weed out obviously wrong block addresses of NULL or very low
168+
// values. Note that we should not call this for ::free(NULL),
169+
// which should be handled by os::free() above us.
170+
if (((size_t)p2i(this)) < K) {
171+
fatal(PREFIX "Block at " PTR_FORMAT ": invalid block address", p2i(this));
172+
}
173+
174+
// From here on we assume the block pointer to be valid. We could
175+
// use SafeFetch but since this is a hot path we don't. If we are
176+
// wrong, we will crash when accessing the canary, which hopefully
177+
// generates distinct crash report.
178+
179+
// Weed out obviously unaligned addresses. NMT blocks, being the result of
180+
// malloc calls, should adhere to malloc() alignment. Malloc alignment is
181+
// specified by the standard by this requirement:
182+
// "malloc returns a pointer which is suitably aligned for any built-in type"
183+
// For us it means that it is *at least* 64-bit on all of our 32-bit and
184+
// 64-bit platforms since we have native 64-bit types. It very probably is
185+
// larger than that, since there exist scalar types larger than 64bit. Here,
186+
// we test the smallest alignment we know.
187+
// Should we ever start using std::max_align_t, this would be one place to
188+
// fix up.
189+
if (!is_aligned(this, sizeof(uint64_t))) {
190+
print_block_on_error(tty, (address)this);
191+
fatal(PREFIX "Block at " PTR_FORMAT ": block address is unaligned", p2i(this));
192+
}
193+
194+
// Check header canary
195+
if (_canary != _header_canary_life_mark) {
196+
print_block_on_error(tty, (address)this);
197+
fatal(PREFIX "Block at " PTR_FORMAT ": header canary broken.", p2i(this));
198+
}
199+
200+
#ifndef _LP64
201+
// On 32-bit we have a second canary, check that one too.
202+
if (_alt_canary != _header_alt_canary_life_mark) {
203+
print_block_on_error(tty, (address)this);
204+
fatal(PREFIX "Block at " PTR_FORMAT ": header alternate canary broken.", p2i(this));
205+
}
206+
#endif
207+
208+
// Does block size seems reasonable?
209+
if (_size >= max_reasonable_malloc_size) {
210+
print_block_on_error(tty, (address)this);
211+
fatal(PREFIX "Block at " PTR_FORMAT ": header looks invalid (weirdly large block size)", p2i(this));
212+
}
213+
214+
// Check footer canary
215+
if (get_footer() != _footer_canary_life_mark) {
216+
print_block_on_error(tty, footer_address());
217+
fatal(PREFIX "Block at " PTR_FORMAT ": footer canary broken at " PTR_FORMAT " (buffer overflow?)",
218+
p2i(this), p2i(footer_address()));
219+
}
220+
#undef PREFIX
115221
}
116222

117223
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,

src/hotspot/share/services/mallocTracker.hpp

Lines changed: 106 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -239,31 +239,99 @@ class MallocMemorySummary : AllStatic {
239239

240240
/*
241241
* Malloc tracking header.
242-
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
243-
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
242+
*
243+
* If NMT is active (state >= minimal), we need to track allocations. A simple and cheap way to
244+
* do this is by using malloc headers.
245+
*
246+
* The user allocation is preceded by a header and is immediately followed by a (possibly unaligned)
247+
* footer canary:
248+
*
249+
* +--------------+------------- .... ------------------+-----+
250+
* | header | user | can |
251+
* | | allocation | ary |
252+
* +--------------+------------- .... ------------------+-----+
253+
* 16 bytes user size 2 byte
254+
*
255+
* Alignment:
256+
*
257+
* The start of the user allocation needs to adhere to malloc alignment. We assume 128 bits
258+
* on both 64-bit/32-bit to be enough for that. So the malloc header is 16 bytes long on both
259+
* 32-bit and 64-bit.
260+
*
261+
* Layout on 64-bit:
262+
*
263+
* 0 1 2 3 4 5 6 7
264+
* +--------+--------+--------+--------+--------+--------+--------+--------+
265+
* | 64-bit size | ...
266+
* +--------+--------+--------+--------+--------+--------+--------+--------+
267+
*
268+
* 8 9 10 11 12 13 14 15 16 ++
269+
* +--------+--------+--------+--------+--------+--------+--------+--------+ ------------------------
270+
* ... | bucket idx | pos idx | flags | unused | canary | ... User payload ....
271+
* +--------+--------+--------+--------+--------+--------+--------+--------+ ------------------------
272+
*
273+
* Layout on 32-bit:
274+
*
275+
* 0 1 2 3 4 5 6 7
276+
* +--------+--------+--------+--------+--------+--------+--------+--------+
277+
* | alt. canary | 32-bit size | ...
278+
* +--------+--------+--------+--------+--------+--------+--------+--------+
279+
*
280+
* 8 9 10 11 12 13 14 15 16 ++
281+
* +--------+--------+--------+--------+--------+--------+--------+--------+ ------------------------
282+
* ... | bucket idx | pos idx | flags | unused | canary | ... User payload ....
283+
* +--------+--------+--------+--------+--------+--------+--------+--------+ ------------------------
284+
*
285+
* Notes:
286+
* - We have a canary in the two bytes directly preceding the user payload. That allows us to
287+
* catch negative buffer overflows.
288+
* - On 32-bit, due to the smaller size_t, we have some bits to spare. So we also have a second
289+
* canary at the very start of the malloc header (generously sized 32 bits).
290+
* - The footer canary consists of two bytes. Since the footer location may be unaligned to 16 bits,
291+
* the bytes are stored individually.
244292
*/
245293

246294
class MallocHeader {
247-
#ifdef _LP64
248-
size_t _size : 64;
249-
size_t _flags : 8;
250-
size_t _pos_idx : 16;
251-
size_t _bucket_idx: 40;
252-
#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
253-
#define MAX_BUCKET_LENGTH right_n_bits(16)
254-
#else
255-
size_t _size : 32;
256-
size_t _flags : 8;
257-
size_t _pos_idx : 8;
258-
size_t _bucket_idx: 16;
259-
#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16)
260-
#define MAX_BUCKET_LENGTH right_n_bits(8)
261-
#endif // _LP64
295+
296+
NOT_LP64(uint32_t _alt_canary);
297+
size_t _size;
298+
uint16_t _bucket_idx;
299+
uint16_t _pos_idx;
300+
uint8_t _flags;
301+
uint8_t _unused;
302+
uint16_t _canary;
303+
304+
#define MAX_MALLOCSITE_TABLE_SIZE (USHRT_MAX - 1)
305+
#define MAX_BUCKET_LENGTH (USHRT_MAX - 1)
306+
307+
static const uint16_t _header_canary_life_mark = 0xE99E;
308+
static const uint16_t _header_canary_dead_mark = 0xD99D;
309+
static const uint16_t _footer_canary_life_mark = 0xE88E;
310+
static const uint16_t _footer_canary_dead_mark = 0xD88D;
311+
NOT_LP64(static const uint32_t _header_alt_canary_life_mark = 0xE99EE99E;)
312+
NOT_LP64(static const uint32_t _header_alt_canary_dead_mark = 0xD88DD88D;)
313+
314+
// We discount sizes larger than these
315+
static const size_t max_reasonable_malloc_size = LP64_ONLY(256 * G) NOT_LP64(3500 * M);
316+
317+
// Check block integrity. If block is broken, print out a report
318+
// to tty (optionally with hex dump surrounding the broken block),
319+
// then trigger a fatal error.
320+
void check_block_integrity() const;
321+
void print_block_on_error(outputStream* st, address bad_address) const;
322+
void mark_block_as_dead();
323+
324+
static uint16_t build_footer(uint8_t b1, uint8_t b2) { return ((uint16_t)b1 << 8) | (uint16_t)b2; }
325+
326+
uint8_t* footer_address() const { return ((address)this) + sizeof(MallocHeader) + _size; }
327+
uint16_t get_footer() const { return build_footer(footer_address()[0], footer_address()[1]); }
328+
void set_footer(uint16_t v) { footer_address()[0] = v >> 8; footer_address()[1] = (uint8_t)v; }
262329

263330
public:
331+
264332
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
265-
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
266-
"Wrong header size");
333+
334+
assert(size < max_reasonable_malloc_size, "Too large allocation size?");
267335

268336
if (level == NMT_minimal) {
269337
return;
@@ -277,11 +345,18 @@ class MallocHeader {
277345
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) {
278346
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
279347
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
280-
_bucket_idx = bucket_idx;
281-
_pos_idx = pos_idx;
348+
_bucket_idx = (uint16_t)bucket_idx;
349+
_pos_idx = (uint16_t)pos_idx;
282350
}
283351
}
284352

353+
_unused = 0;
354+
_canary = _header_canary_life_mark;
355+
// On 32-bit we have some bits more, use them for a second canary
356+
// guarding the start of the header.
357+
NOT_LP64(_alt_canary = _header_alt_canary_life_mark;)
358+
set_footer(_footer_canary_life_mark); // set after initializing _size
359+
285360
MallocMemorySummary::record_malloc(size, flags);
286361
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
287362
}
@@ -290,8 +365,8 @@ class MallocHeader {
290365
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
291366
bool get_stack(NativeCallStack& stack) const;
292367

293-
// Cleanup tracking information before the memory is released.
294-
void release() const;
368+
// Cleanup tracking information and mark block as dead before the memory is released.
369+
void release();
295370

296371
private:
297372
inline void set_size(size_t size) {
@@ -301,6 +376,9 @@ class MallocHeader {
301376
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const;
302377
};
303378

379+
// This needs to be true on both 64-bit and 32-bit platforms
380+
STATIC_ASSERT(sizeof(MallocHeader) == (sizeof(uint64_t) * 2));
381+
304382

305383
// Main class called from MemTracker to track malloc activities
306384
class MallocTracker : AllStatic {
@@ -315,6 +393,11 @@ class MallocTracker : AllStatic {
315393
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
316394
}
317395

396+
// malloc tracking footer size for specific tracking level
397+
static inline size_t malloc_footer_size(NMT_TrackingLevel level) {
398+
return (level == NMT_off) ? 0 : sizeof(uint16_t);
399+
}
400+
318401
// Parameter name convention:
319402
// memblock : the beginning address for user data
320403
// malloc_base: the beginning address that includes malloc tracking header
@@ -349,11 +432,6 @@ class MallocTracker : AllStatic {
349432
return header->flags();
350433
}
351434

352-
// Get header size
353-
static inline size_t get_header_size(void* memblock) {
354-
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
355-
}
356-
357435
static inline void record_new_arena(MEMFLAGS flags) {
358436
MallocMemorySummary::record_new_arena(flags);
359437
}

0 commit comments

Comments
 (0)