Skip to content

Commit 45d277f

Browse files
committed
8270308: Arena::Amalloc may return misaligned address on 32-bit
Reviewed-by: coleenp, kbarrett
1 parent fde1831 commit 45d277f

File tree

3 files changed

+115
-11
lines changed

3 files changed

+115
-11
lines changed

src/hotspot/share/memory/arena.cpp

Lines changed: 38 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,17 @@
3030
#include "runtime/task.hpp"
3131
#include "runtime/threadCritical.hpp"
3232
#include "services/memTracker.hpp"
33+
#include "utilities/align.hpp"
34+
#include "utilities/debug.hpp"
3335
#include "utilities/ostream.hpp"
3436

37+
// Pre-defined default chunk sizes must be arena-aligned, see Chunk::operator new()
38+
STATIC_ASSERT(is_aligned((int)Chunk::tiny_size, ARENA_AMALLOC_ALIGNMENT));
39+
STATIC_ASSERT(is_aligned((int)Chunk::init_size, ARENA_AMALLOC_ALIGNMENT));
40+
STATIC_ASSERT(is_aligned((int)Chunk::medium_size, ARENA_AMALLOC_ALIGNMENT));
41+
STATIC_ASSERT(is_aligned((int)Chunk::size, ARENA_AMALLOC_ALIGNMENT));
42+
STATIC_ASSERT(is_aligned((int)Chunk::non_pool_size, ARENA_AMALLOC_ALIGNMENT));
43+
3544
//--------------------------------------------------------------------------------------
3645
// ChunkPool implementation
3746

@@ -171,13 +180,30 @@ class ChunkPoolCleaner : public PeriodicTask {
171180
//--------------------------------------------------------------------------------------
172181
// Chunk implementation
173182

174-
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
175-
// requested_size is equal to sizeof(Chunk) but in order for the arena
176-
// allocations to come out aligned as expected the size must be aligned
177-
// to expected arena alignment.
178-
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
179-
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
180-
size_t bytes = ARENA_ALIGN(requested_size) + length;
183+
void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, size_t length) throw() {
184+
185+
// - requested_size = sizeof(Chunk)
186+
// - length = payload size
187+
// We must ensure that the boundaries of the payload (C and D) are aligned to 64-bit:
188+
//
189+
// +-----------+--+--------------------------------------------+
190+
// | |g | |
191+
// | Chunk |a | Payload |
192+
// | |p | |
193+
// +-----------+--+--------------------------------------------+
194+
// A B C D
195+
//
196+
// - The Chunk is allocated from C-heap, therefore its start address (A) should be
197+
// 64-bit aligned on all our platforms, including 32-bit.
198+
// - sizeof(Chunk) (B) may not be aligned to 64-bit, and we have to take that into
199+
// account when calculating the Payload bottom (C) (see Chunk::bottom())
200+
// - the payload size (length) must be aligned to 64-bit, which takes care of 64-bit
201+
// aligning (D)
202+
203+
assert(sizeofChunk == sizeof(Chunk), "weird request size");
204+
assert(is_aligned(length, ARENA_AMALLOC_ALIGNMENT), "chunk payload length misaligned: "
205+
SIZE_FORMAT ".", length);
206+
size_t bytes = ARENA_ALIGN(sizeofChunk) + length;
181207
switch (length) {
182208
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
183209
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
@@ -188,6 +214,8 @@ void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode,
188214
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
189215
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
190216
}
217+
// We rely on arena alignment <= malloc alignment.
218+
assert(is_aligned(p, ARENA_AMALLOC_ALIGNMENT), "Chunk start address misaligned.");
191219
return p;
192220
}
193221
}
@@ -239,8 +267,7 @@ void Chunk::start_chunk_pool_cleaner_task() {
239267
//------------------------------Arena------------------------------------------
240268

241269
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
242-
size_t round_size = (sizeof (char *)) - 1;
243-
init_size = (init_size+round_size) & ~round_size;
270+
init_size = ARENA_ALIGN(init_size);
244271
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
245272
_hwm = _chunk->bottom(); // Save the cached hwm, max
246273
_max = _chunk->top();
@@ -340,7 +367,8 @@ size_t Arena::used() const {
340367
// Grow a new Chunk
341368
void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
342369
// Get minimal required size. Either real big, or even bigger for giant objs
343-
size_t len = MAX2(x, (size_t) Chunk::size);
370+
// (Note: all chunk sizes have to be 64-bit aligned)
371+
size_t len = MAX2(ARENA_ALIGN(x), (size_t) Chunk::size);
344372

345373
Chunk *k = _chunk; // Get filled-up chunk address
346374
_chunk = new (alloc_failmode, len) Chunk(len);

src/hotspot/share/memory/arena.hpp

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,12 @@ class Chunk: CHeapObj<mtChunk> {
5252
enum {
5353
// default sizes; make them slightly smaller than 2**k to guard against
5454
// buddy-system style malloc implementations
55+
// Note: please keep these constants 64-bit aligned.
5556
#ifdef _LP64
5657
slack = 40, // [RGV] Not sure if this is right, but make it
5758
// a multiple of 8.
5859
#else
59-
slack = 20, // suspected sizeof(Chunk) + internal malloc headers
60+
slack = 24, // suspected sizeof(Chunk) + internal malloc headers
6061
#endif
6162

6263
tiny_size = 256 - slack, // Size of first chunk (tiny)
@@ -134,6 +135,11 @@ class Arena : public CHeapObj<mtNone> {
134135
void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
135136
x = ARENA_ALIGN(x); // note for 32 bits this should align _hwm as well.
136137
debug_only(if (UseMallocOnly) return malloc(x);)
138+
// Amalloc guarantees 64-bit alignment and we need to ensure that in case the preceding
139+
// allocation was AmallocWords. Only needed on 32-bit - on 64-bit Amalloc and AmallocWords are
140+
// identical.
141+
assert(is_aligned(_max, ARENA_AMALLOC_ALIGNMENT), "chunk end unaligned?");
142+
NOT_LP64(_hwm = ARENA_ALIGN(_hwm));
137143
return internal_amalloc(x, alloc_failmode);
138144
}
139145

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
/*
2+
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
3+
* Copyright (c) 2021 SAP SE. All rights reserved.
4+
*
5+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6+
*
7+
* This code is free software; you can redistribute it and/or modify it
8+
* under the terms of the GNU General Public License version 2 only, as
9+
* published by the Free Software Foundation.
10+
*
11+
* This code is distributed in the hope that it will be useful, but WITHOUT
12+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14+
* version 2 for more details (a copy is included in the LICENSE file that
15+
* accompanied this code).
16+
*
17+
* You should have received a copy of the GNU General Public License version
18+
* 2 along with this work; if not, write to the Free Software Foundation,
19+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20+
*
21+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22+
* or visit www.oracle.com if you need additional information or have any
23+
* questions.
24+
*/
25+
26+
#include "precompiled.hpp"
27+
#include "memory/arena.hpp"
28+
#include "utilities/align.hpp"
29+
#include "utilities/globalDefinitions.hpp"
30+
#include "unittest.hpp"
31+
32+
#ifndef LP64
33+
// These tests below are about alignment issues when mixing Amalloc and AmallocWords.
34+
// Since on 64-bit these APIs offer the same alignment, they only matter for 32-bit.
35+
36+
TEST_VM(Arena, mixed_alignment_allocation) {
37+
// Test that mixed alignment allocations work and provide allocations with the correct
38+
// alignment
39+
Arena ar(mtTest);
40+
void* p1 = ar.AmallocWords(BytesPerWord);
41+
void* p2 = ar.Amalloc(BytesPerLong);
42+
ASSERT_TRUE(is_aligned(p1, BytesPerWord));
43+
ASSERT_TRUE(is_aligned(p2, ARENA_AMALLOC_ALIGNMENT));
44+
}
45+
46+
TEST_VM(Arena, Arena_with_crooked_initial_size) {
47+
// Test that an arena with a crooked, not 64-bit aligned initial size
48+
// works
49+
Arena ar(mtTest, 4097);
50+
void* p1 = ar.AmallocWords(BytesPerWord);
51+
void* p2 = ar.Amalloc(BytesPerLong);
52+
ASSERT_TRUE(is_aligned(p1, BytesPerWord));
53+
ASSERT_TRUE(is_aligned(p2, ARENA_AMALLOC_ALIGNMENT));
54+
}
55+
56+
TEST_VM(Arena, Arena_grows_large_unaligned) {
57+
// Test that if the arena grows with a large unaligned value, nothing bad happens.
58+
// We trigger allocation of a new, large, unaligned chunk with a non-standard size
59+
// (only possible on 32-bit when allocating with word alignment).
60+
// Then we alloc some more. If Arena::grow() does not correctly align, on 32-bit
61+
// something should assert at some point.
62+
Arena ar(mtTest, 100); // first chunk is small
63+
void* p = ar.AmallocWords(Chunk::size + BytesPerWord); // if Arena::grow() misaligns, this asserts
64+
// some more allocations for good measure
65+
for (int i = 0; i < 100; i ++) {
66+
ar.Amalloc(1);
67+
}
68+
}
69+
70+
#endif // LP64

0 commit comments

Comments
 (0)