@@ -3728,7 +3728,10 @@ Node* GraphKit::new_instance(Node* klass_node,
3728
3728
// -------------------------------new_array-------------------------------------
3729
3729
// helper for both newarray and anewarray
3730
3730
// The 'length' parameter is (obviously) the length of the array.
3731
- // See comments on new_instance for the meaning of the other arguments.
3731
+ // The optional arguments are for specialized use by intrinsics:
3732
+ // - If 'return_size_val', report the non-padded array size (sum of header size
3733
+ // and array body) to the caller.
3734
+ // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3732
3735
Node* GraphKit::new_array (Node* klass_node, // array klass (maybe variable)
3733
3736
Node* length, // number of array elements
3734
3737
int nargs, // number of arguments to push back for uncommon trap
@@ -3779,25 +3782,21 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3779
3782
// The rounding mask is strength-reduced, if possible.
3780
3783
int round_mask = MinObjAlignmentInBytes - 1 ;
3781
3784
Node* header_size = nullptr ;
3782
- int header_size_min = arrayOopDesc::base_offset_in_bytes (T_BYTE);
3783
3785
// (T_BYTE has the weakest alignment and size restrictions...)
3784
3786
if (layout_is_con) {
3785
3787
int hsize = Klass::layout_helper_header_size (layout_con);
3786
3788
int eshift = Klass::layout_helper_log2_element_size (layout_con);
3787
- BasicType etype = Klass::layout_helper_element_type (layout_con);
3788
3789
if ((round_mask & ~right_n_bits (eshift)) == 0 )
3789
3790
round_mask = 0 ; // strength-reduce it if it goes away completely
3790
3791
assert ((hsize & right_n_bits (eshift)) == 0 , " hsize is pre-rounded" );
3792
+ int header_size_min = arrayOopDesc::base_offset_in_bytes (T_BYTE);
3791
3793
assert (header_size_min <= hsize, " generic minimum is smallest" );
3792
- header_size_min = hsize;
3793
- header_size = intcon (hsize + round_mask);
3794
+ header_size = intcon (hsize);
3794
3795
} else {
3795
3796
Node* hss = intcon (Klass::_lh_header_size_shift);
3796
3797
Node* hsm = intcon (Klass::_lh_header_size_mask);
3797
- Node* hsize = _gvn.transform ( new URShiftINode (layout_val, hss) );
3798
- hsize = _gvn.transform ( new AndINode (hsize, hsm) );
3799
- Node* mask = intcon (round_mask);
3800
- header_size = _gvn.transform ( new AddINode (hsize, mask) );
3798
+ header_size = _gvn.transform (new URShiftINode (layout_val, hss));
3799
+ header_size = _gvn.transform (new AndINode (header_size, hsm));
3801
3800
}
3802
3801
3803
3802
Node* elem_shift = nullptr ;
@@ -3849,25 +3848,30 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3849
3848
}
3850
3849
#endif
3851
3850
3852
- // Combine header size (plus rounding) and body size. Then round down.
3853
- // This computation cannot overflow, because it is used only in two
3854
- // places, one where the length is sharply limited, and the other
3855
- // after a successful allocation.
3851
+ // Combine header size and body size for the array copy part, then align (if
3852
+ // necessary) for the allocation part. This computation cannot overflow,
3853
+ // because it is used only in two places, one where the length is sharply
3854
+ // limited, and the other after a successful allocation.
3856
3855
Node* abody = lengthx;
3857
- if (elem_shift != nullptr )
3858
- abody = _gvn.transform ( new LShiftXNode (lengthx, elem_shift) );
3859
- Node* size = _gvn.transform ( new AddXNode (headerx, abody) );
3860
- if (round_mask != 0 ) {
3861
- Node* mask = MakeConX (~round_mask);
3862
- size = _gvn.transform ( new AndXNode (size, mask) );
3856
+ if (elem_shift != nullptr ) {
3857
+ abody = _gvn.transform (new LShiftXNode (lengthx, elem_shift));
3863
3858
}
3864
- // else if round_mask == 0, the size computation is self-rounding
3859
+ Node* non_rounded_size = _gvn. transform ( new AddXNode (headerx, abody));
3865
3860
3866
3861
if (return_size_val != nullptr ) {
3867
3862
// This is the size
3868
- (*return_size_val) = size ;
3863
+ (*return_size_val) = non_rounded_size ;
3869
3864
}
3870
3865
3866
+ Node* size = non_rounded_size;
3867
+ if (round_mask != 0 ) {
3868
+ Node* mask1 = MakeConX (round_mask);
3869
+ size = _gvn.transform (new AddXNode (size, mask1));
3870
+ Node* mask2 = MakeConX (~round_mask);
3871
+ size = _gvn.transform (new AndXNode (size, mask2));
3872
+ }
3873
+ // else if round_mask == 0, the size computation is self-rounding
3874
+
3871
3875
// Now generate allocation code
3872
3876
3873
3877
// The entire memory state is needed for slow path of the allocation
0 commit comments