39
39
#include " gc/g1/g1OopClosures.hpp"
40
40
#include " gc/g1/g1Policy.hpp"
41
41
#include " gc/g1/g1RegionMarkStatsCache.inline.hpp"
42
+ #include " gc/shared/gcForwarding.hpp"
42
43
#include " gc/shared/gcTraceTime.inline.hpp"
43
44
#include " gc/shared/preservedMarks.hpp"
44
45
#include " gc/shared/referenceProcessor.hpp"
45
- #include " gc/shared/slidingForwarding.hpp"
46
46
#include " gc/shared/verifyOption.hpp"
47
47
#include " gc/shared/weakProcessor.inline.hpp"
48
48
#include " gc/shared/workerPolicy.hpp"
@@ -208,6 +208,7 @@ void G1FullCollector::collect() {
208
208
// Don't add any more derived pointers during later phases
209
209
deactivate_derived_pointers ();
210
210
211
+ GCForwarding::begin ();
211
212
phase2_prepare_compaction ();
212
213
213
214
if (has_compaction_targets ()) {
@@ -220,6 +221,8 @@ void G1FullCollector::collect() {
220
221
log_info (gc, phases) (" No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap" );
221
222
}
222
223
224
+ GCForwarding::end ();
225
+
223
226
phase5_reset_metadata ();
224
227
225
228
G1CollectedHeap::finish_codecache_marking_cycle ();
@@ -331,8 +334,6 @@ void G1FullCollector::phase1_mark_live_objects() {
331
334
void G1FullCollector::phase2_prepare_compaction () {
332
335
GCTraceTime (Info, gc, phases) info (" Phase 2: Prepare compaction" , scope ()->timer ());
333
336
334
- _heap->forwarding ()->clear ();
335
-
336
337
phase2a_determine_worklists ();
337
338
338
339
if (!has_compaction_targets ()) {
@@ -344,10 +345,9 @@ void G1FullCollector::phase2_prepare_compaction() {
344
345
// Try to avoid OOM immediately after Full GC in case there are no free regions
345
346
// left after determining the result locations (i.e. this phase). Prepare to
346
347
// maximally compact the tail regions of the compaction queues serially.
347
- // TODO: Disabled for now because it violates sliding-forwarding assumption.
348
- // if (scope()->do_maximal_compaction() || !has_free_compaction_targets) {
349
- // phase2c_prepare_serial_compaction();
350
- // }
348
+ if (!UseCompactObjectHeaders && (scope ()->do_maximal_compaction () || !has_free_compaction_targets)) {
349
+ phase2c_prepare_serial_compaction ();
350
+ }
351
351
}
352
352
353
353
void G1FullCollector::phase2a_determine_worklists () {
@@ -366,61 +366,61 @@ bool G1FullCollector::phase2b_forward_oops() {
366
366
return task.has_free_compaction_targets ();
367
367
}
368
368
369
- // uint G1FullCollector::truncate_parallel_cps() {
370
- // uint lowest_current = (uint)-1;
371
- // for (uint i = 0; i < workers(); i++) {
372
- // G1FullGCCompactionPoint* cp = compaction_point(i);
373
- // if (cp->has_regions()) {
374
- // lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
375
- // }
376
- // }
377
-
378
- // for (uint i = 0; i < workers(); i++) {
379
- // G1FullGCCompactionPoint* cp = compaction_point(i);
380
- // if (cp->has_regions()) {
381
- // cp->remove_at_or_above(lowest_current);
382
- // }
383
- // }
384
- // return lowest_current;
385
- // }
386
-
387
- // void G1FullCollector::phase2c_prepare_serial_compaction() {
388
- // GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
389
- // // At this point, we know that after parallel compaction there will be regions that
390
- // // are partially compacted into. Thus, the last compaction region of all
391
- // // compaction queues still have space in them. We try to re-compact these regions
392
- // // in serial to avoid a premature OOM when the mutator wants to allocate the first
393
- // // eden region after gc.
394
- //
395
- // // For maximum compaction, we need to re-prepare all objects above the lowest
396
- // // region among the current regions for all thread compaction points. It may
397
- // // happen that due to the uneven distribution of objects to parallel threads, holes
398
- // // have been created as threads compact to different target regions between the
399
- // // lowest and the highest region in the tails of the compaction points.
400
- //
401
- // uint start_serial = truncate_parallel_cps();
402
- // assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
403
- //
404
- // G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
405
- // assert(!serial_cp->is_initialized(), "sanity!");
406
- //
407
- // HeapRegion* start_hr = _heap->region_at(start_serial);
408
- // serial_cp->add(start_hr);
409
- // serial_cp->initialize(start_hr);
410
- //
411
- // HeapWord* dense_prefix_top = compaction_top(start_hr);
412
- // G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
413
- //
414
- // for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
415
- // if (is_compaction_target(i)) {
416
- // HeapRegion* current = _heap->region_at(i);
417
- // set_compaction_top(current, current->bottom());
418
- // serial_cp->add(current);
419
- // current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
420
- // }
421
- // }
422
- // serial_cp->update();
423
- // }
369
+ uint G1FullCollector::truncate_parallel_cps () {
370
+ uint lowest_current = (uint)-1 ;
371
+ for (uint i = 0 ; i < workers (); i++) {
372
+ G1FullGCCompactionPoint* cp = compaction_point (i);
373
+ if (cp->has_regions ()) {
374
+ lowest_current = MIN2 (lowest_current, cp->current_region ()->hrm_index ());
375
+ }
376
+ }
377
+
378
+ for (uint i = 0 ; i < workers (); i++) {
379
+ G1FullGCCompactionPoint* cp = compaction_point (i);
380
+ if (cp->has_regions ()) {
381
+ cp->remove_at_or_above (lowest_current);
382
+ }
383
+ }
384
+ return lowest_current;
385
+ }
386
+
387
+ void G1FullCollector::phase2c_prepare_serial_compaction () {
388
+ GCTraceTime (Debug, gc, phases) debug (" Phase 2: Prepare serial compaction" , scope ()->timer ());
389
+ // At this point, we know that after parallel compaction there will be regions that
390
+ // are partially compacted into. Thus, the last compaction region of all
391
+ // compaction queues still have space in them. We try to re-compact these regions
392
+ // in serial to avoid a premature OOM when the mutator wants to allocate the first
393
+ // eden region after gc.
394
+
395
+ // For maximum compaction, we need to re-prepare all objects above the lowest
396
+ // region among the current regions for all thread compaction points. It may
397
+ // happen that due to the uneven distribution of objects to parallel threads, holes
398
+ // have been created as threads compact to different target regions between the
399
+ // lowest and the highest region in the tails of the compaction points.
400
+
401
+ uint start_serial = truncate_parallel_cps ();
402
+ assert (start_serial < _heap->max_reserved_regions (), " Called on empty parallel compaction queues" );
403
+
404
+ G1FullGCCompactionPoint* serial_cp = serial_compaction_point ();
405
+ assert (!serial_cp->is_initialized (), " sanity!" );
406
+
407
+ HeapRegion* start_hr = _heap->region_at (start_serial);
408
+ serial_cp->add (start_hr);
409
+ serial_cp->initialize (start_hr);
410
+
411
+ HeapWord* dense_prefix_top = compaction_top (start_hr);
412
+ G1SerialRePrepareClosure re_prepare (serial_cp, dense_prefix_top);
413
+
414
+ for (uint i = start_serial + 1 ; i < _heap->max_reserved_regions (); i++) {
415
+ if (is_compaction_target (i)) {
416
+ HeapRegion* current = _heap->region_at (i);
417
+ set_compaction_top (current, current->bottom ());
418
+ serial_cp->add (current);
419
+ current->apply_to_marked_objects (mark_bitmap (), &re_prepare);
420
+ }
421
+ }
422
+ serial_cp->update ();
423
+ }
424
424
425
425
void G1FullCollector::phase3_adjust_pointers () {
426
426
// Adjust the pointers to reflect the new locations
0 commit comments