@@ -297,37 +297,18 @@ char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, in
297
297
return map_memory_to_file (base, size, fd);
298
298
}
299
299
300
- // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
301
- // so on posix, unmap the section at the start and at the end of the chunk that we mapped
302
- // rather than unmapping and remapping the whole chunk to get requested alignment.
303
- char * os::reserve_memory_aligned (size_t size, size_t alignment, int file_desc) {
300
+ static size_t calculate_aligned_extra_size (size_t size, size_t alignment) {
304
301
assert ((alignment & (os::vm_allocation_granularity () - 1 )) == 0 ,
305
302
" Alignment must be a multiple of allocation granularity (page size)" );
306
303
assert ((size & (alignment -1 )) == 0 , " size must be 'alignment' aligned" );
307
304
308
305
size_t extra_size = size + alignment;
309
306
assert (extra_size >= size, " overflow, size is too large to allow alignment" );
307
+ return extra_size;
308
+ }
310
309
311
- char * extra_base;
312
- if (file_desc != -1 ) {
313
- // For file mapping, we do not call os:reserve_memory_with_fd since:
314
- // - we later chop away parts of the mapping using os::release_memory and that could fail if the
315
- // original mmap call had been tied to an fd.
316
- // - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
317
- // mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
318
- // chopping off and unmapping excess bits back and front (see below) would not work.
319
- extra_base = reserve_mmapped_memory (extra_size, NULL );
320
- if (extra_base != NULL ) {
321
- MemTracker::record_virtual_memory_reserve ((address)extra_base, extra_size, CALLER_PC);
322
- }
323
- } else {
324
- extra_base = os::reserve_memory (extra_size);
325
- }
326
-
327
- if (extra_base == NULL ) {
328
- return NULL ;
329
- }
330
-
310
+ // After a bigger chunk was mapped, unmaps start and end parts to get the requested alignment.
311
+ static char * chop_extra_memory (size_t size, size_t alignment, char * extra_base, size_t extra_size) {
331
312
// Do manual alignment
332
313
char * aligned_base = align_up (extra_base, alignment);
333
314
@@ -349,13 +330,39 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
349
330
os::release_memory (extra_base + begin_offset + size, end_offset);
350
331
}
351
332
352
- if (file_desc != -1 ) {
353
- // After we have an aligned address, we can replace anonymous mapping with file mapping
354
- if (replace_existing_mapping_with_file_mapping (aligned_base, size, file_desc) == NULL ) {
355
- vm_exit_during_initialization (err_msg (" Error in mapping Java heap at the given filesystem directory" ));
356
- }
357
- MemTracker::record_virtual_memory_commit ((address)aligned_base, size, CALLER_PC);
333
+ return aligned_base;
334
+ }
335
+
336
+ // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
337
+ // so on posix, unmap the section at the start and at the end of the chunk that we mapped
338
+ // rather than unmapping and remapping the whole chunk to get requested alignment.
339
+ char * os::reserve_memory_aligned (size_t size, size_t alignment) {
340
+ size_t extra_size = calculate_aligned_extra_size (size, alignment);
341
+ char * extra_base = os::reserve_memory (extra_size);
342
+ if (extra_base == NULL ) {
343
+ return NULL ;
344
+ }
345
+ return chop_extra_memory (size, alignment, extra_base, extra_size);
346
+ }
347
+
348
+ char * os::map_memory_to_file_aligned (size_t size, size_t alignment, int file_desc) {
349
+ size_t extra_size = calculate_aligned_extra_size (size, alignment);
350
+ // For file mapping, we do not call os:map_memory_to_file(size,fd) since:
351
+ // - we later chop away parts of the mapping using os::release_memory and that could fail if the
352
+ // original mmap call had been tied to an fd.
353
+ // - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
354
+ // mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
355
+ // chopping off and unmapping excess bits back and front (see below) would not work.
356
+ char * extra_base = reserve_mmapped_memory (extra_size, NULL );
357
+ if (extra_base == NULL ) {
358
+ return NULL ;
359
+ }
360
+ char * aligned_base = chop_extra_memory (size, alignment, extra_base, extra_size);
361
+ // After we have an aligned address, we can replace anonymous mapping with file mapping
362
+ if (replace_existing_mapping_with_file_mapping (aligned_base, size, file_desc) == NULL ) {
363
+ vm_exit_during_initialization (err_msg (" Error in mapping Java heap at the given filesystem directory" ));
358
364
}
365
+ MemTracker::record_virtual_memory_commit ((address)aligned_base, size, CALLER_PC);
359
366
return aligned_base;
360
367
}
361
368
0 commit comments