|
30 | 30 | #include "gc/g1/g1EvacFailureRegions.hpp" |
31 | 31 | #include "gc/g1/g1HeapVerifier.hpp" |
32 | 32 | #include "gc/g1/g1OopClosures.inline.hpp" |
| 33 | +#include "gc/g1/g1RedirtyCardsQueue.hpp" |
33 | 34 | #include "gc/g1/heapRegion.hpp" |
34 | 35 | #include "gc/g1/heapRegionRemSet.inline.hpp" |
35 | 36 | #include "gc/shared/preservedMarks.inline.hpp" |
36 | 37 | #include "oops/access.inline.hpp" |
37 | 38 | #include "oops/compressedOops.inline.hpp" |
38 | 39 | #include "oops/oop.inline.hpp" |
39 | 40 |
|
| 41 | +class UpdateLogBuffersDeferred : public BasicOopIterateClosure { |
| 42 | +private: |
| 43 | + G1CollectedHeap* _g1h; |
| 44 | + G1RedirtyCardsLocalQueueSet* _rdc_local_qset; |
| 45 | + G1CardTable* _ct; |
| 46 | + |
| 47 | + // Remember the last enqueued card to avoid enqueuing the same card over and over; |
| 48 | + // since we only ever handle a card once, this is sufficient. |
| 49 | + size_t _last_enqueued_card; |
| 50 | + |
| 51 | +public: |
| 52 | + UpdateLogBuffersDeferred(G1RedirtyCardsLocalQueueSet* rdc_local_qset) : |
| 53 | + _g1h(G1CollectedHeap::heap()), |
| 54 | + _rdc_local_qset(rdc_local_qset), |
| 55 | + _ct(_g1h->card_table()), |
| 56 | + _last_enqueued_card(SIZE_MAX) {} |
| 57 | + |
| 58 | + virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
| 59 | + virtual void do_oop( oop* p) { do_oop_work(p); } |
| 60 | + template <class T> void do_oop_work(T* p) { |
| 61 | + assert(_g1h->heap_region_containing(p)->is_in_reserved(p), "paranoia"); |
| 62 | + assert(!_g1h->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region"); |
| 63 | + |
| 64 | + T const o = RawAccess<>::oop_load(p); |
| 65 | + if (CompressedOops::is_null(o)) { |
| 66 | + return; |
| 67 | + } |
| 68 | + |
| 69 | + if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) { |
| 70 | + return; |
| 71 | + } |
| 72 | + size_t card_index = _ct->index_for(p); |
| 73 | + if (card_index != _last_enqueued_card) { |
| 74 | + _rdc_local_qset->enqueue(_ct->byte_for_index(card_index)); |
| 75 | + _last_enqueued_card = card_index; |
| 76 | + } |
| 77 | + } |
| 78 | +}; |
| 79 | + |
40 | 80 | class RemoveSelfForwardPtrObjClosure: public ObjectClosure { |
41 | 81 | G1CollectedHeap* _g1h; |
42 | 82 | G1ConcurrentMark* _cm; |
43 | 83 | HeapRegion* _hr; |
44 | 84 | size_t _marked_bytes; |
| 85 | + UpdateLogBuffersDeferred* _log_buffer_cl; |
45 | 86 | bool _during_concurrent_start; |
46 | 87 | uint _worker_id; |
47 | 88 | HeapWord* _last_forwarded_object_end; |
48 | 89 |
|
49 | 90 | public: |
50 | 91 | RemoveSelfForwardPtrObjClosure(HeapRegion* hr, |
| 92 | + UpdateLogBuffersDeferred* log_buffer_cl, |
51 | 93 | bool during_concurrent_start, |
52 | 94 | uint worker_id) : |
53 | 95 | _g1h(G1CollectedHeap::heap()), |
54 | 96 | _cm(_g1h->concurrent_mark()), |
55 | 97 | _hr(hr), |
56 | 98 | _marked_bytes(0), |
| 99 | + _log_buffer_cl(log_buffer_cl), |
57 | 100 | _during_concurrent_start(during_concurrent_start), |
58 | 101 | _worker_id(worker_id), |
59 | 102 | _last_forwarded_object_end(hr->bottom()) { } |
@@ -98,6 +141,20 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure { |
98 | 141 | _marked_bytes += (obj_size * HeapWordSize); |
99 | 142 | PreservedMarks::init_forwarded_mark(obj); |
100 | 143 |
|
| 144 | + // While we were processing RSet buffers during the collection, |
| 145 | + // we actually didn't scan any cards on the collection set, |
| 146 | + // since we didn't want to update remembered sets with entries |
| 147 | + // that point into the collection set, given that live objects |
| 148 | + // from the collection set are about to move and such entries |
| 149 | + // will be stale very soon. |
| 150 | + // This change also dealt with a reliability issue which |
| 151 | + // involved scanning a card in the collection set and coming |
| 152 | + // across an array that was being chunked and looking malformed. |
| 153 | + // The problem is that, if evacuation fails, we might have |
| 154 | + // remembered set entries missing given that we skipped cards on |
| 155 | + // the collection set. So, we'll recreate such entries now. |
| 156 | + obj->oop_iterate(_log_buffer_cl); |
| 157 | + |
101 | 158 | HeapWord* obj_end = obj_addr + obj_size; |
102 | 159 | _last_forwarded_object_end = obj_end; |
103 | 160 | _hr->alloc_block_in_bot(obj_addr, obj_end); |
@@ -146,22 +203,33 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { |
146 | 203 | G1CollectedHeap* _g1h; |
147 | 204 | uint _worker_id; |
148 | 205 |
|
| 206 | + G1RedirtyCardsLocalQueueSet _rdc_local_qset; |
| 207 | + UpdateLogBuffersDeferred _log_buffer_cl; |
| 208 | + |
149 | 209 | uint volatile* _num_failed_regions; |
150 | 210 | G1EvacFailureRegions* _evac_failure_regions; |
151 | 211 |
|
152 | 212 | public: |
153 | | - RemoveSelfForwardPtrHRClosure(uint worker_id, |
| 213 | + RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, |
| 214 | + uint worker_id, |
154 | 215 | uint volatile* num_failed_regions, |
155 | 216 | G1EvacFailureRegions* evac_failure_regions) : |
156 | 217 | _g1h(G1CollectedHeap::heap()), |
157 | 218 | _worker_id(worker_id), |
| 219 | + _rdc_local_qset(rdcqs), |
| 220 | + _log_buffer_cl(&_rdc_local_qset), |
158 | 221 | _num_failed_regions(num_failed_regions), |
159 | 222 | _evac_failure_regions(evac_failure_regions) { |
160 | 223 | } |
161 | 224 |
|
| 225 | + ~RemoveSelfForwardPtrHRClosure() { |
| 226 | + _rdc_local_qset.flush(); |
| 227 | + } |
| 228 | + |
162 | 229 | size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr, |
163 | 230 | bool during_concurrent_start) { |
164 | 231 | RemoveSelfForwardPtrObjClosure rspc(hr, |
| 232 | + &_log_buffer_cl, |
165 | 233 | during_concurrent_start, |
166 | 234 | _worker_id); |
167 | 235 | hr->object_iterate(&rspc); |
@@ -200,15 +268,17 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { |
200 | 268 | } |
201 | 269 | }; |
202 | 270 |
|
203 | | -G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions) : |
| 271 | +G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs, |
| 272 | + G1EvacFailureRegions* evac_failure_regions) : |
204 | 273 | AbstractGangTask("G1 Remove Self-forwarding Pointers"), |
205 | 274 | _g1h(G1CollectedHeap::heap()), |
| 275 | + _rdcqs(rdcqs), |
206 | 276 | _hrclaimer(_g1h->workers()->active_workers()), |
207 | 277 | _evac_failure_regions(evac_failure_regions), |
208 | 278 | _num_failed_regions(0) { } |
209 | 279 |
|
210 | 280 | void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) { |
211 | | - RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_num_failed_regions, _evac_failure_regions); |
| 281 | + RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id, &_num_failed_regions, _evac_failure_regions); |
212 | 282 |
|
213 | 283 | // Iterate through all regions that failed evacuation during the entire collection. |
214 | 284 | _evac_failure_regions->par_iterate(&rsfp_cl, &_hrclaimer, worker_id); |
|
0 commit comments