@@ -3898,8 +3898,8 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
3898
3898
return addr;
3899
3899
}
3900
3900
3901
- static void warn_on_large_pages_failure (char * req_addr, size_t bytes,
3902
- int error) {
3901
+ static void warn_on_commit_special_failure (char * req_addr, size_t bytes,
3902
+ size_t page_size, int error) {
3903
3903
assert (error == ENOMEM, " Only expect to fail if no memory is available" );
3904
3904
3905
3905
bool warn_on_failure = UseLargePages &&
@@ -3909,139 +3909,101 @@ static void warn_on_large_pages_failure(char* req_addr, size_t bytes,
3909
3909
3910
3910
if (warn_on_failure) {
3911
3911
char msg[128 ];
3912
- jio_snprintf (msg, sizeof (msg), " Failed to reserve large pages memory req_addr: "
3913
- PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d)." , req_addr, bytes, error);
3912
+ jio_snprintf (msg, sizeof (msg), " Failed to reserve and commit memory. req_addr: "
3913
+ PTR_FORMAT " bytes: " SIZE_FORMAT " page size: "
3914
+ SIZE_FORMAT " (errno = %d)." ,
3915
+ req_addr, bytes, page_size, error);
3914
3916
warning (" %s" , msg);
3915
3917
}
3916
3918
}
3917
3919
3918
- char * os::Linux::reserve_memory_special_huge_tlbfs_only (size_t bytes,
3919
- char * req_addr,
3920
- bool exec) {
3921
- assert (UseLargePages && UseHugeTLBFS, " only for Huge TLBFS large pages" );
3922
- assert (is_aligned (bytes, os::large_page_size ()), " Unaligned size" );
3923
- assert (is_aligned (req_addr, os::large_page_size ()), " Unaligned address" );
3920
+ bool os::Linux::commit_memory_special (size_t bytes,
3921
+ size_t page_size,
3922
+ char * req_addr,
3923
+ bool exec) {
3924
+ assert (UseLargePages && UseHugeTLBFS, " Should only get here when HugeTLBFS large pages are used" );
3925
+ assert (is_aligned (bytes, page_size), " Unaligned size" );
3926
+ assert (is_aligned (req_addr, page_size), " Unaligned address" );
3927
+ assert (req_addr != NULL , " Must have a requested address for special mappings" );
3924
3928
3925
3929
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3926
- int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB;
3927
- // Ensure the correct page size flag is used when needed.
3928
- flags |= hugetlbfs_page_size_flag (os::large_page_size ());
3930
+ int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
3929
3931
3932
+ // For large pages additional flags are required.
3933
+ if (page_size > (size_t ) os::vm_page_size ()) {
3934
+ flags |= MAP_HUGETLB | hugetlbfs_page_size_flag (page_size);
3935
+ }
3930
3936
char * addr = (char *)::mmap (req_addr, bytes, prot, flags, -1 , 0 );
3931
3937
3932
3938
if (addr == MAP_FAILED) {
3933
- warn_on_large_pages_failure (req_addr, bytes, errno);
3934
- return NULL ;
3939
+ warn_on_commit_special_failure (req_addr, bytes, page_size , errno);
3940
+ return false ;
3935
3941
}
3936
3942
3937
- assert (is_aligned (addr, os::large_page_size ()), " Must be" );
3938
-
3939
- return addr;
3943
+ log_debug (pagesize)(" Commit special mapping: " PTR_FORMAT " , size=" SIZE_FORMAT " %s, page size="
3944
+ SIZE_FORMAT " %s" ,
3945
+ p2i (addr), byte_size_in_exact_unit (bytes),
3946
+ exact_unit_for_byte_size (bytes),
3947
+ byte_size_in_exact_unit (page_size),
3948
+ exact_unit_for_byte_size (page_size));
3949
+ assert (is_aligned (addr, page_size), " Must be" );
3950
+ return true ;
3940
3951
}
3941
3952
3942
- // Reserve memory using mmap(MAP_HUGETLB).
3943
- // - bytes shall be a multiple of alignment.
3944
- // - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3945
- // - alignment sets the alignment at which memory shall be allocated.
3946
- // It must be a multiple of allocation granularity.
3947
- // Returns address of memory or NULL. If req_addr was not NULL, will only return
3948
- // req_addr or NULL.
3949
- char * os::Linux::reserve_memory_special_huge_tlbfs_mixed (size_t bytes,
3950
- size_t alignment,
3951
- char * req_addr,
3952
- bool exec) {
3953
- size_t large_page_size = os::large_page_size ();
3954
- assert (bytes >= large_page_size, " Shouldn't allocate large pages for small sizes" );
3955
-
3953
+ char * os::Linux::reserve_memory_special_huge_tlbfs (size_t bytes,
3954
+ size_t alignment,
3955
+ char * req_addr,
3956
+ bool exec) {
3957
+ assert (UseLargePages && UseHugeTLBFS, " only for Huge TLBFS large pages" );
3956
3958
assert (is_aligned (req_addr, alignment), " Must be" );
3957
- assert (is_aligned (bytes, alignment), " Must be" );
3958
-
3959
- // First reserve - but not commit - the address range in small pages.
3960
- char * const start = anon_mmap_aligned (req_addr, bytes, alignment);
3961
-
3962
- if (start == NULL ) {
3963
- return NULL ;
3964
- }
3965
-
3966
- assert (is_aligned (start, alignment), " Must be" );
3967
-
3968
- char * end = start + bytes;
3969
-
3970
- // Find the regions of the allocated chunk that can be promoted to large pages.
3971
- char * lp_start = align_up (start, large_page_size);
3972
- char * lp_end = align_down (end, large_page_size);
3973
-
3974
- size_t lp_bytes = lp_end - lp_start;
3975
-
3976
- assert (is_aligned (lp_bytes, large_page_size), " Must be" );
3959
+ assert (is_aligned (req_addr, os::large_page_size ()), " Must be" );
3960
+ assert (is_aligned (alignment, os::vm_allocation_granularity ()), " Must be" );
3961
+ assert (is_power_of_2 (os::large_page_size ()), " Must be" );
3962
+ assert (bytes >= os::large_page_size (), " Shouldn't allocate large pages for small sizes" );
3977
3963
3978
- if (lp_bytes == 0 ) {
3979
- // The mapped region doesn't even span the start and the end of a large page.
3980
- // Fall back to allocate a non-special area.
3981
- ::munmap (start, end - start);
3964
+ // We only end up here when at least 1 large page can be used.
3965
+ // If the size is not a multiple of the large page size, we
3966
+ // will mix the type of pages used, but in a decending order.
3967
+ // Start off by reserving a range of the given size that is
3968
+ // properly aligned. At this point no pages are committed. If
3969
+ // a requested address is given it will be used and it must be
3970
+ // aligned to both the large page size and the given alignment.
3971
+ // The larger of the two will be used.
3972
+ size_t required_alignment = MAX (os::large_page_size (), alignment);
3973
+ char * const aligned_start = anon_mmap_aligned (req_addr, bytes, required_alignment);
3974
+ if (aligned_start == NULL ) {
3982
3975
return NULL ;
3983
3976
}
3984
3977
3985
- int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3986
- int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED ;
3987
- void * result ;
3978
+ // First commit using large pages.
3979
+ size_t large_bytes = align_down (bytes, os::large_page_size ()) ;
3980
+ bool large_committed = commit_memory_special (large_bytes, os::large_page_size (), aligned_start, exec) ;
3988
3981
3989
- // Commit small-paged leading area.
3990
- if (start != lp_start) {
3991
- result = ::mmap (start, lp_start - start, prot, flags, -1 , 0 );
3992
- if (result == MAP_FAILED) {
3993
- ::munmap (lp_start, end - lp_start);
3994
- return NULL ;
3995
- }
3982
+ if (large_committed && bytes == large_bytes) {
3983
+ // The size was large page aligned so no additional work is
3984
+ // needed even if the commit failed.
3985
+ return aligned_start;
3996
3986
}
3997
3987
3998
- // Commit large-paged area.
3999
- flags |= MAP_HUGETLB | hugetlbfs_page_size_flag (os::large_page_size ());
4000
-
4001
- result = ::mmap (lp_start, lp_bytes, prot, flags, -1 , 0 );
4002
- if (result == MAP_FAILED) {
4003
- warn_on_large_pages_failure (lp_start, lp_bytes, errno);
4004
- // If the mmap above fails, the large pages region will be unmapped and we
4005
- // have regions before and after with small pages. Release these regions.
4006
- //
4007
- // | mapped | unmapped | mapped |
4008
- // ^ ^ ^ ^
4009
- // start lp_start lp_end end
4010
- //
4011
- ::munmap (start, lp_start - start);
4012
- ::munmap (lp_end, end - lp_end);
3988
+ // The requested size requires some small pages as well.
3989
+ char * small_start = aligned_start + large_bytes;
3990
+ size_t small_size = bytes - large_bytes;
3991
+ if (!large_committed) {
3992
+ // Failed to commit large pages, so we need to unmap the
3993
+ // reminder of the orinal reservation.
3994
+ ::munmap (small_start, small_size);
4013
3995
return NULL ;
4014
3996
}
4015
3997
4016
- // Commit small-paged trailing area.
4017
- if (lp_end != end) {
4018
- result = ::mmap (lp_end, end - lp_end, prot,
4019
- MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
4020
- -1 , 0 );
4021
- if (result == MAP_FAILED) {
4022
- ::munmap (start, lp_end - start);
4023
- return NULL ;
4024
- }
4025
- }
4026
-
4027
- return start;
4028
- }
4029
-
4030
- char * os::Linux::reserve_memory_special_huge_tlbfs (size_t bytes,
4031
- size_t alignment,
4032
- char * req_addr,
4033
- bool exec) {
4034
- assert (UseLargePages && UseHugeTLBFS, " only for Huge TLBFS large pages" );
4035
- assert (is_aligned (req_addr, alignment), " Must be" );
4036
- assert (is_aligned (alignment, os::vm_allocation_granularity ()), " Must be" );
4037
- assert (is_power_of_2 (os::large_page_size ()), " Must be" );
4038
- assert (bytes >= os::large_page_size (), " Shouldn't allocate large pages for small sizes" );
4039
-
4040
- if (is_aligned (bytes, os::large_page_size ()) && alignment <= os::large_page_size ()) {
4041
- return reserve_memory_special_huge_tlbfs_only (bytes, req_addr, exec);
4042
- } else {
4043
- return reserve_memory_special_huge_tlbfs_mixed (bytes, alignment, req_addr, exec);
3998
+ // Commit the remaining bytes using small pages.
3999
+ bool small_committed = commit_memory_special (small_size, os::vm_page_size (), small_start, exec);
4000
+ if (!small_committed) {
4001
+ // Failed to commit the remaining size, need to unmap
4002
+ // the large pages part of the reservation.
4003
+ ::munmap (aligned_start, large_bytes);
4004
+ return NULL ;
4044
4005
}
4006
+ return aligned_start;
4045
4007
}
4046
4008
4047
4009
char * os::pd_reserve_memory_special (size_t bytes, size_t alignment,
0 commit comments