@@ -52,9 +52,9 @@ GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
52
52
53
53
// The following are offsets from buffer_bottom()
54
54
size_t ArchiveHeapWriter::_buffer_used;
55
- size_t ArchiveHeapWriter::_heap_roots_offset;
56
55
57
- size_t ArchiveHeapWriter::_heap_roots_word_size;
56
+ // Heap root segments
57
+ HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
58
58
59
59
address ArchiveHeapWriter::_requested_bottom;
60
60
address ArchiveHeapWriter::_requested_top;
@@ -164,10 +164,6 @@ address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr
164
164
return _requested_bottom + buffered_address_to_offset (buffered_addr);
165
165
}
166
166
167
- oop ArchiveHeapWriter::heap_roots_requested_address () {
168
- return cast_to_oop (_requested_bottom + _heap_roots_offset);
169
- }
170
-
171
167
address ArchiveHeapWriter::requested_address () {
172
168
assert (_buffer != nullptr , " must be initialized" );
173
169
return _requested_bottom;
@@ -186,47 +182,71 @@ void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
186
182
_buffer->at_grow (to_array_index (min_bytes));
187
183
}
188
184
189
- void ArchiveHeapWriter::copy_roots_to_buffer (GrowableArrayCHeap<oop, mtClassShared>* roots) {
190
- Klass* k = Universe::objectArrayKlass (); // already relocated to point to archived klass
191
- int length = roots->length ();
192
- _heap_roots_word_size = objArrayOopDesc::object_size (length);
193
- size_t byte_size = _heap_roots_word_size * HeapWordSize;
194
- if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
195
- log_error (cds, heap)(" roots array is too large. Please reduce the number of classes" );
196
- vm_exit (1 );
197
- }
185
+ objArrayOop ArchiveHeapWriter::allocate_root_segment (size_t offset, int element_count) {
186
+ HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
187
+ memset (mem, 0 , objArrayOopDesc::object_size (element_count));
198
188
199
- maybe_fill_gc_region_gap (byte_size);
200
-
201
- size_t new_used = _buffer_used + byte_size;
202
- ensure_buffer_space (new_used);
189
+ // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
190
+ oopDesc::set_mark (mem, markWord::prototype ());
191
+ oopDesc::release_set_klass (mem, Universe::objectArrayKlass ());
192
+ arrayOopDesc::set_length (mem, element_count);
193
+ return objArrayOop (cast_to_oop (mem));
194
+ }
203
195
204
- HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
205
- memset (mem, 0 , byte_size);
206
- {
207
- // This is copied from MemAllocator::finish
208
- oopDesc::set_mark (mem, markWord::prototype ());
209
- oopDesc::release_set_klass (mem, k);
210
- }
211
- {
212
- // This is copied from ObjArrayAllocator::initialize
213
- arrayOopDesc::set_length (mem, length);
196
+ void ArchiveHeapWriter::root_segment_at_put (objArrayOop segment, int index, oop root) {
197
+ // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
198
+ if (UseCompressedOops) {
199
+ *segment->obj_at_addr <narrowOop>(index ) = CompressedOops::encode (root);
200
+ } else {
201
+ *segment->obj_at_addr <oop>(index ) = root;
214
202
}
203
+ }
215
204
216
- objArrayOop arrayOop = objArrayOop (cast_to_oop (mem));
217
- for (int i = 0 ; i < length; i++) {
218
- // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
219
- oop o = roots->at (i);
220
- if (UseCompressedOops) {
221
- * arrayOop->obj_at_addr <narrowOop>(i) = CompressedOops::encode (o);
222
- } else {
223
- * arrayOop->obj_at_addr <oop>(i) = o;
205
+ void ArchiveHeapWriter::copy_roots_to_buffer (GrowableArrayCHeap<oop, mtClassShared>* roots) {
206
+ // Depending on the number of classes we are archiving, a single roots array may be
207
+ // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
208
+ // allows us to chop the large array into a series of "segments". Current layout
209
+ // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
210
+ // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
211
+ // This is simple and efficient. We do not need filler objects anywhere between the segments,
212
+ // or immediately after the last segment. This allows starting the object dump immediately
213
+ // after the roots.
214
+
215
+ assert ((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0 ,
216
+ " Pre-condition: Roots start at aligned boundary: " SIZE_FORMAT, _buffer_used);
217
+
218
+ int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes ()) / heapOopSize);
219
+ assert (objArrayOopDesc::object_size (max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
220
+ " Should match exactly" );
221
+
222
+ HeapRootSegments segments (_buffer_used,
223
+ roots->length (),
224
+ MIN_GC_REGION_ALIGNMENT,
225
+ max_elem_count);
226
+
227
+ for (size_t seg_idx = 0 ; seg_idx < segments.count (); seg_idx++) {
228
+ int size_elems = segments.size_in_elems (seg_idx);
229
+ size_t size_bytes = segments.size_in_bytes (seg_idx);
230
+
231
+ size_t oop_offset = _buffer_used;
232
+ _buffer_used = oop_offset + size_bytes;
233
+ ensure_buffer_space (_buffer_used);
234
+
235
+ assert ((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0 ,
236
+ " Roots segment " SIZE_FORMAT " start is not aligned: " SIZE_FORMAT,
237
+ segments.count (), oop_offset);
238
+
239
+ int root_index = 0 ;
240
+ objArrayOop seg_oop = allocate_root_segment (oop_offset, size_elems);
241
+ for (int i = 0 ; i < size_elems; i++) {
242
+ root_segment_at_put (seg_oop, i, roots->at (root_index++));
224
243
}
244
+
245
+ log_info (cds, heap)(" archived obj root segment [%d] = " SIZE_FORMAT " bytes, obj = " PTR_FORMAT,
246
+ size_elems, size_bytes, p2i (seg_oop));
225
247
}
226
- log_info (cds, heap)(" archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p" , length, byte_size, k, mem);
227
248
228
- _heap_roots_offset = _buffer_used;
229
- _buffer_used = new_used;
249
+ _heap_root_segments = segments;
230
250
}
231
251
232
252
static int oop_sorting_rank (oop o) {
@@ -282,6 +302,10 @@ void ArchiveHeapWriter::sort_source_objs() {
282
302
}
283
303
284
304
void ArchiveHeapWriter::copy_source_objs_to_buffer (GrowableArrayCHeap<oop, mtClassShared>* roots) {
305
+ // There could be multiple root segments, which we want to be aligned by region.
306
+ // Putting them ahead of objects makes sure we waste no space.
307
+ copy_roots_to_buffer (roots);
308
+
285
309
sort_source_objs ();
286
310
for (int i = 0 ; i < _source_objs_order->length (); i++) {
287
311
int src_obj_index = _source_objs_order->at (i)._index ;
@@ -295,8 +319,6 @@ void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtCla
295
319
_buffer_offset_to_source_obj_table->maybe_grow ();
296
320
}
297
321
298
- copy_roots_to_buffer (roots);
299
-
300
322
log_info (cds)(" Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs" ,
301
323
_buffer_used, _source_objs->length () + 1 , roots->length (), _num_native_ptrs);
302
324
}
@@ -455,7 +477,7 @@ void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
455
477
456
478
info->set_buffer_region (MemRegion (offset_to_buffered_address<HeapWord*>(0 ),
457
479
offset_to_buffered_address<HeapWord*>(_buffer_used)));
458
- info->set_heap_roots_offset (_heap_roots_offset );
480
+ info->set_heap_root_segments (_heap_root_segments );
459
481
}
460
482
461
483
// Oop relocation
@@ -543,12 +565,6 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
543
565
}
544
566
}
545
567
546
- // Relocate an element in the buffered copy of HeapShared::roots()
547
- template <typename T> void ArchiveHeapWriter::relocate_root_at (oop requested_roots, int index, CHeapBitMap* oopmap) {
548
- size_t offset = (size_t )((objArrayOop)requested_roots)->obj_at_offset <T>(index );
549
- relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr () + offset), oopmap);
550
- }
551
-
552
568
class ArchiveHeapWriter ::EmbeddedOopRelocator: public BasicOopIterateClosure {
553
569
oop _src_obj;
554
570
address _buffered_obj;
@@ -600,14 +616,24 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
600
616
601
617
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
602
618
// doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
603
- oop requested_roots = requested_obj_from_buffer_offset (_heap_roots_offset);
604
- update_header_for_requested_obj (requested_roots, nullptr , Universe::objectArrayKlass ());
605
- int length = roots != nullptr ? roots->length () : 0 ;
606
- for (int i = 0 ; i < length; i++) {
619
+ for (size_t seg_idx = 0 ; seg_idx < _heap_root_segments.count (); seg_idx++) {
620
+ size_t seg_offset = _heap_root_segments.segment_offset (seg_idx);
621
+
622
+ objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset (seg_offset);
623
+ update_header_for_requested_obj (requested_obj, nullptr , Universe::objectArrayKlass ());
624
+ address buffered_obj = offset_to_buffered_address<address>(seg_offset);
625
+ int length = _heap_root_segments.size_in_elems (seg_idx);
626
+
607
627
if (UseCompressedOops) {
608
- relocate_root_at<narrowOop>(requested_roots, i, heap_info->oopmap ());
628
+ for (int i = 0 ; i < length; i++) {
629
+ narrowOop* addr = (narrowOop*)(buffered_obj + objArrayOopDesc::obj_at_offset<narrowOop>(i));
630
+ relocate_field_in_buffer<narrowOop>(addr, heap_info->oopmap ());
631
+ }
609
632
} else {
610
- relocate_root_at<oop>(requested_roots, i, heap_info->oopmap ());
633
+ for (int i = 0 ; i < length; i++) {
634
+ oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i));
635
+ relocate_field_in_buffer<oop>(addr, heap_info->oopmap ());
636
+ }
611
637
}
612
638
}
613
639
0 commit comments