@@ -393,7 +393,84 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
393
393
#if INCLUDE_SERIALGC
394
394
395
395
void ContiguousSpace::prepare_for_compaction (CompactPoint* cp) {
396
- scan_and_forward (this , cp);
396
+ // Compute the new addresses for the live objects and store it in the mark
397
+ // Used by universe::mark_sweep_phase2()
398
+
399
+ // We're sure to be here before any objects are compacted into this
400
+ // space, so this is a good time to initialize this:
401
+ set_compaction_top (bottom ());
402
+
403
+ if (cp->space == NULL ) {
404
+ assert (cp->gen != NULL , " need a generation" );
405
+ assert (cp->threshold == NULL , " just checking" );
406
+ assert (cp->gen ->first_compaction_space () == this , " just checking" );
407
+ cp->space = cp->gen ->first_compaction_space ();
408
+ cp->threshold = cp->space ->initialize_threshold ();
409
+ cp->space ->set_compaction_top (cp->space ->bottom ());
410
+ }
411
+
412
+ HeapWord* compact_top = cp->space ->compaction_top (); // This is where we are currently compacting to.
413
+
414
+ DeadSpacer dead_spacer (this );
415
+
416
+ HeapWord* end_of_live = bottom (); // One byte beyond the last byte of the last live object.
417
+ HeapWord* first_dead = NULL ; // The first dead object.
418
+
419
+ const intx interval = PrefetchScanIntervalInBytes;
420
+
421
+ HeapWord* cur_obj = bottom ();
422
+ HeapWord* scan_limit = top ();
423
+
424
+ while (cur_obj < scan_limit) {
425
+ if (cast_to_oop (cur_obj)->is_gc_marked ()) {
426
+ // prefetch beyond cur_obj
427
+ Prefetch::write (cur_obj, interval);
428
+ size_t size = cast_to_oop (cur_obj)->size ();
429
+ compact_top = cp->space ->forward (cast_to_oop (cur_obj), size, cp, compact_top);
430
+ cur_obj += size;
431
+ end_of_live = cur_obj;
432
+ } else {
433
+ // run over all the contiguous dead objects
434
+ HeapWord* end = cur_obj;
435
+ do {
436
+ // prefetch beyond end
437
+ Prefetch::write (end, interval);
438
+ end += cast_to_oop (end)->size ();
439
+ } while (end < scan_limit && !cast_to_oop (end)->is_gc_marked ());
440
+
441
+ // see if we might want to pretend this object is alive so that
442
+ // we don't have to compact quite as often.
443
+ if (cur_obj == compact_top && dead_spacer.insert_deadspace (cur_obj, end)) {
444
+ oop obj = cast_to_oop (cur_obj);
445
+ compact_top = cp->space ->forward (obj, obj->size (), cp, compact_top);
446
+ end_of_live = end;
447
+ } else {
448
+ // otherwise, it really is a free region.
449
+
450
+ // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
451
+ *(HeapWord**)cur_obj = end;
452
+
453
+ // see if this is the first dead region.
454
+ if (first_dead == NULL ) {
455
+ first_dead = cur_obj;
456
+ }
457
+ }
458
+
459
+ // move on to the next object
460
+ cur_obj = end;
461
+ }
462
+ }
463
+
464
+ assert (cur_obj == scan_limit, " just checking" );
465
+ _end_of_live = end_of_live;
466
+ if (first_dead != NULL ) {
467
+ _first_dead = first_dead;
468
+ } else {
469
+ _first_dead = end_of_live;
470
+ }
471
+
472
+ // save the compaction_top of the compaction space.
473
+ cp->space ->set_compaction_top (compact_top);
397
474
}
398
475
399
476
void CompactibleSpace::adjust_pointers () {
@@ -402,11 +479,94 @@ void CompactibleSpace::adjust_pointers() {
402
479
return ; // Nothing to do.
403
480
}
404
481
405
- scan_and_adjust_pointers (this );
482
+ // adjust all the interior pointers to point at the new locations of objects
483
+ // Used by MarkSweep::mark_sweep_phase3()
484
+
485
+ HeapWord* cur_obj = bottom ();
486
+ HeapWord* const end_of_live = _end_of_live; // Established by prepare_for_compaction().
487
+ HeapWord* const first_dead = _first_dead; // Established by prepare_for_compaction().
488
+
489
+ assert (first_dead <= end_of_live, " Stands to reason, no?" );
490
+
491
+ const intx interval = PrefetchScanIntervalInBytes;
492
+
493
+ debug_only (HeapWord* prev_obj = NULL );
494
+ while (cur_obj < end_of_live) {
495
+ Prefetch::write (cur_obj, interval);
496
+ if (cur_obj < first_dead || cast_to_oop (cur_obj)->is_gc_marked ()) {
497
+ // cur_obj is alive
498
+ // point all the oops to the new location
499
+ size_t size = MarkSweep::adjust_pointers (cast_to_oop (cur_obj));
500
+ debug_only (prev_obj = cur_obj);
501
+ cur_obj += size;
502
+ } else {
503
+ debug_only (prev_obj = cur_obj);
504
+ // cur_obj is not a live object, instead it points at the next live object
505
+ cur_obj = *(HeapWord**)cur_obj;
506
+ assert (cur_obj > prev_obj, " we should be moving forward through memory, cur_obj: " PTR_FORMAT " , prev_obj: " PTR_FORMAT, p2i (cur_obj), p2i (prev_obj));
507
+ }
508
+ }
509
+
510
+ assert (cur_obj == end_of_live, " just checking" );
406
511
}
407
512
408
513
void CompactibleSpace::compact () {
409
- scan_and_compact (this );
514
+ // Copy all live objects to their new location
515
+ // Used by MarkSweep::mark_sweep_phase4()
516
+
517
+ verify_up_to_first_dead (this );
518
+
519
+ HeapWord* const start = bottom ();
520
+ HeapWord* const end_of_live = _end_of_live;
521
+
522
+ assert (_first_dead <= end_of_live, " Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i (_first_dead), p2i (end_of_live));
523
+ if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop (start)->is_gc_marked ())) {
524
+ // Nothing to compact. The space is either empty or all live object should be left in place.
525
+ clear_empty_region (this );
526
+ return ;
527
+ }
528
+
529
+ const intx scan_interval = PrefetchScanIntervalInBytes;
530
+ const intx copy_interval = PrefetchCopyIntervalInBytes;
531
+
532
+ assert (start < end_of_live, " bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i (start), p2i (end_of_live));
533
+ HeapWord* cur_obj = start;
534
+ if (_first_dead > cur_obj && !cast_to_oop (cur_obj)->is_gc_marked ()) {
535
+ // All object before _first_dead can be skipped. They should not be moved.
536
+ // A pointer to the first live object is stored at the memory location for _first_dead.
537
+ cur_obj = *(HeapWord**)(_first_dead);
538
+ }
539
+
540
+ debug_only (HeapWord* prev_obj = NULL );
541
+ while (cur_obj < end_of_live) {
542
+ if (!cast_to_oop (cur_obj)->is_gc_marked ()) {
543
+ debug_only (prev_obj = cur_obj);
544
+ // The first word of the dead object contains a pointer to the next live object or end of space.
545
+ cur_obj = *(HeapWord**)cur_obj;
546
+ assert (cur_obj > prev_obj, " we should be moving forward through memory" );
547
+ } else {
548
+ // prefetch beyond q
549
+ Prefetch::read (cur_obj, scan_interval);
550
+
551
+ // size and destination
552
+ size_t size = cast_to_oop (cur_obj)->size ();
553
+ HeapWord* compaction_top = cast_from_oop<HeapWord*>(cast_to_oop (cur_obj)->forwardee ());
554
+
555
+ // prefetch beyond compaction_top
556
+ Prefetch::write (compaction_top, copy_interval);
557
+
558
+ // copy object and reinit its mark
559
+ assert (cur_obj != compaction_top, " everything in this pass should be moving" );
560
+ Copy::aligned_conjoint_words (cur_obj, compaction_top, size);
561
+ cast_to_oop (compaction_top)->init_mark ();
562
+ assert (cast_to_oop (compaction_top)->klass () != NULL , " should have a class" );
563
+
564
+ debug_only (prev_obj = cur_obj);
565
+ cur_obj += size;
566
+ }
567
+ }
568
+
569
+ clear_empty_region (this );
410
570
}
411
571
412
572
#endif // INCLUDE_SERIALGC
0 commit comments