diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 6cbe44fef09fb..60ee92190b33c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -270,6 +270,7 @@ class ShenandoahHeap : public CollectedHeap { public: inline HeapWord* base() const { return _heap_region.start(); } + inline HeapWord* end() const { return _heap_region.end(); } inline size_t num_regions() const { return _num_regions; } inline bool is_heap_region_special() { return _heap_region_special; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp index 34e6af41b427c..5318f38d8ef67 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp @@ -57,6 +57,26 @@ bool ShenandoahMarkBitMap::is_bitmap_clear_range(const HeapWord* start, const He return (result == end); } +HeapWord* ShenandoahMarkBitMap::get_prev_marked_addr(const HeapWord* limit, + const HeapWord* addr) const { +#ifdef ASSERT + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapRegion* r = heap->heap_region_containing(addr); + ShenandoahMarkingContext* ctx = heap->marking_context(); + HeapWord* tams = ctx->top_at_mark_start(r); + assert(limit != nullptr, "limit must not be null"); + assert(limit >= r->bottom(), "limit must be more than bottom"); + assert(addr <= tams, "addr must be less than TAMS"); +#endif + + // Round addr down to a possible object boundary to be safe. + size_t const addr_offset = address_to_index(align_down(addr, HeapWordSize << LogMinObjAlignment)); + size_t const limit_offset = address_to_index(limit); + size_t const last_offset = get_prev_one_offset(limit_offset, addr_offset); + + // cast required to remove const-ness of the value pointed to. We won't modify that object, but my caller might. + return (last_offset > addr_offset)? (HeapWord*) addr + 1: index_to_address(last_offset); +} HeapWord* ShenandoahMarkBitMap::get_next_marked_addr(const HeapWord* addr, const HeapWord* limit) const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp index c094ec434f5d7..73bf3ecbeea9b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp @@ -119,9 +119,21 @@ class ShenandoahMarkBitMap { template inline idx_t get_next_bit_impl(idx_t l_index, idx_t r_index) const; - inline idx_t get_next_one_offset (idx_t l_index, idx_t r_index) const; + // Helper for get_prev_{zero,one}_bit variants. + // - flip designates whether searching for 1s or 0s. Must be one of + // find_{zeros,ones}_flip. + // - aligned_left is true if l_index is a priori on a bm_word_t boundary. + template + inline idx_t get_prev_bit_impl(idx_t l_index, idx_t r_index) const; + + // Search for the first marked address in the range [l_index, r_index), or r_index if none found. + inline idx_t get_next_one_offset(idx_t l_index, idx_t r_index) const; - void clear_large_range (idx_t beg, idx_t end); + // Search for last one in the range [l_index, r_index). Return r_index if not found. + inline idx_t get_prev_one_offset(idx_t l_index, idx_t r_index) const; + + // Clear the strong and weak mark bits for all index positions >= l_index and < r_index. + void clear_large_range(idx_t beg, idx_t end); // Verify bit is less than size(). void verify_index(idx_t bit) const NOT_DEBUG_RETURN; @@ -162,12 +174,14 @@ class ShenandoahMarkBitMap { bool is_bitmap_clear_range(const HeapWord* start, const HeapWord* end) const; - // Return the address corresponding to the next marked bit at or after - // "addr", and before "limit", if "limit" is non-null. If there is no - // such bit, returns "limit" if that is non-null, or else "endWord()". + // Return the first marked address in the range [addr, limit), or limit if none found. HeapWord* get_next_marked_addr(const HeapWord* addr, const HeapWord* limit) const; + // Return the last marked address in the range [limit, addr], or addr+1 if none found. + HeapWord* get_prev_marked_addr(const HeapWord* limit, + const HeapWord* addr) const; + bm_word_t inverted_bit_mask_for_range(idx_t beg, idx_t end) const; void clear_range_within_word (idx_t beg, idx_t end); void clear_range (idx_t beg, idx_t end); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp index 3bea8d73959e1..ae56db810bbfb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp @@ -29,6 +29,7 @@ #include "gc/shenandoah/shenandoahMarkBitMap.hpp" #include "runtime/atomicAccess.hpp" +#include "utilities/count_leading_zeros.hpp" #include "utilities/count_trailing_zeros.hpp" inline size_t ShenandoahMarkBitMap::address_to_index(const HeapWord* addr) const { @@ -169,10 +170,99 @@ inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_bit_impl(idx_t return r_index; } +template +inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_prev_bit_impl(idx_t l_index, idx_t r_index) const { + STATIC_ASSERT(flip == find_ones_flip || flip == find_zeros_flip); + verify_range(l_index, r_index); + assert(!aligned_left || is_aligned(l_index, BitsPerWord), "l_index not aligned"); + + // The first word often contains an interesting bit, either due to + // density or because of features of the calling algorithm. So it's + // important to examine that first word with a minimum of fuss, + // minimizing setup time for later words that will be wasted if the + // first word is indeed interesting. + + // The benefit from aligned_left being true is relatively small. + // It saves an operation in the setup for the word search loop. + // It also eliminates the range check on the final result. + // However, callers often have a comparison with l_index, and + // inlining often allows the two comparisons to be combined; it is + // important when !aligned_left that return paths either return + // l_index or a value dominating a comparison with l_index. + // aligned_left is still helpful when the caller doesn't have a + // range check because features of the calling algorithm guarantee + // an interesting bit will be present. + + if (l_index < r_index) { + // Get the word containing r_index, and shift out the high-order bits (representing objects that come after r_index) + idx_t index = to_words_align_down(r_index); + assert(BitsPerWord - 2 >= bit_in_word(r_index), "sanity"); + size_t shift = BitsPerWord - 2 - bit_in_word(r_index); + bm_word_t cword = (map(index) ^ flip) << shift; + // After this shift, the highest order bits correspond to r_index. + + // We give special handling if either of the two most significant bits (Weak or Strong) is set. With 64-bit + // words, the mask of interest is 0xc000_0000_0000_0000. Symbolically, this constant is represented by: + const bm_word_t first_object_mask = ((bm_word_t) 0x3) << (BitsPerWord - 2); + if ((cword & first_object_mask) != 0) { + // The first object is similarly often interesting. When it matters + // (density or features of the calling algorithm make it likely + // the first bit is set), going straight to the next clause compares + // poorly with doing this check first; count_leading_zeros can be + // relatively expensive, plus there is the additional range check. + // But when the first bit isn't set, the cost of having tested for + // it is relatively small compared to the rest of the search. + return r_index; + } else if (cword != 0) { + // Note that there are 2 bits corresponding to every index value (Weak and Strong), and every odd index value + // corresponds to the same object as index-1 + // Flipped and shifted first word is non-zero. If leading_zeros is 0 or 1, we return r_index (above). + // if leading zeros is 2 or 3, we return (r_index - 1) or (r_index - 2), and so forth + idx_t result = r_index + 1 - count_leading_zeros(cword); + if (aligned_left || (result >= l_index)) return result; + else { + // Sentinel value means no object found within specified range. + return r_index + 2; + } + } else { + // Flipped and shifted first word is zero. Word search through + // aligned up r_index for a non-zero flipped word. + idx_t limit = aligned_left + ? to_words_align_down(l_index) // Minuscule savings when aligned. + : to_words_align_up(l_index); + // Unsigned index is always >= unsigned limit if limit equals zero, so test for strictly greater than before decrement. + while (index-- > limit) { + cword = map(index) ^ flip; + if (cword != 0) { + // cword hods bits: + // 0x03 for the object corresponding to index (and index+1) (count_leading_zeros is 62 or 63) + // 0x0c for the object corresponding to index + 2 (and index+3) (count_leading_zeros is 60 or 61) + // and so on. + idx_t result = bit_index(index + 1) - (count_leading_zeros(cword) + 1); + if (aligned_left || (result >= l_index)) return result; + else { + // Sentinel value means no object found within specified range. + return r_index + 2; + } + } + } + // No bits in range; return r_index+2. + return r_index + 2; + } + } + else { + return r_index + 2; + } +} + inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_one_offset(idx_t l_offset, idx_t r_offset) const { return get_next_bit_impl(l_offset, r_offset); } +inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_prev_one_offset(idx_t l_offset, idx_t r_offset) const { + return get_prev_bit_impl(l_offset, r_offset); +} + // Returns a bit mask for a range of bits [beg, end) within a single word. Each // bit in the mask is 0 if the bit is in the range, 1 if not in the range. The // returned mask can be used directly to clear the range, or inverted to set the diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp index 8a52042e513ef..d8e0c74ea4e94 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp @@ -67,8 +67,12 @@ class ShenandoahMarkingContext : public CHeapObj { inline bool is_marked_or_old(oop obj) const; inline bool is_marked_strong_or_old(oop obj) const; + // Return address of the first marked address in the range [addr,limit), or limit if no marked object found inline HeapWord* get_next_marked_addr(const HeapWord* addr, const HeapWord* limit) const; + // Return address of the last marked object in range [limit, start], returning start+1 if no marked object found + inline HeapWord* get_prev_marked_addr(const HeapWord* limit, const HeapWord* start) const; + inline bool allocated_after_mark_start(const oop obj) const; inline bool allocated_after_mark_start(const HeapWord* addr) const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp index bff4afc9ce9d0..637dbf47c3f41 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp @@ -72,6 +72,10 @@ inline HeapWord* ShenandoahMarkingContext::get_next_marked_addr(const HeapWord* return _mark_bit_map.get_next_marked_addr(start, limit); } +inline HeapWord* ShenandoahMarkingContext::get_prev_marked_addr(const HeapWord* limit, const HeapWord* start) const { + return _mark_bit_map.get_prev_marked_addr(limit, start); +} + inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const { const HeapWord* addr = cast_from_oop(obj); return allocated_after_mark_start(addr); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp index af6cd6d39ab21..3a99023eca42a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp @@ -137,9 +137,8 @@ void ShenandoahDirectCardMarkRememberedSet::mark_write_table_as_clean() { // No lock required because arguments align with card boundaries. void ShenandoahCardCluster::reset_object_range(HeapWord* from, HeapWord* to) { - assert(((((unsigned long long) from) & (CardTable::card_size() - 1)) == 0) && - ((((unsigned long long) to) & (CardTable::card_size() - 1)) == 0), - "reset_object_range bounds must align with card boundaries"); + assert(CardTable::is_card_aligned(from) && CardTable::is_card_aligned(to), + "Must align with card boundaries"); size_t card_at_start = _rs->card_index_for_addr(from); size_t num_cards = (to - from) / CardTable::card_size_in_words(); @@ -234,31 +233,86 @@ size_t ShenandoahCardCluster::get_last_start(size_t card_index) const { return _object_starts[card_index].offsets.last; } -// Given a card_index, return the starting address of the first block in the heap -// that straddles into this card. If this card is co-initial with an object, then -// this would return the first address of the range that this card covers, which is -// where the card's first object also begins. -HeapWord* ShenandoahCardCluster::block_start(const size_t card_index) const { +HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, const ShenandoahMarkingContext* const ctx, + HeapWord* tams, HeapWord* end_range_of_interest) const { HeapWord* left = _rs->addr_for_card_index(card_index); + assert(left < end_range_of_interest, "No meaningful work to do"); + ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left); #ifdef ASSERT assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode"); - ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left); assert(region->is_old(), "Do not use for young regions"); // For HumongousRegion:s it's more efficient to jump directly to the // start region. assert(!region->is_humongous(), "Use region->humongous_start_region() instead"); #endif + + HeapWord* right = MIN2(region->top(), end_range_of_interest); + HeapWord* end_of_search_next = MIN2(right, tams); + size_t last_relevant_card_index; + if (end_range_of_interest == _end_of_heap) { + last_relevant_card_index = _rs->card_index_for_addr(end_range_of_interest - 1); + } else { + last_relevant_card_index = _rs->card_index_for_addr(end_range_of_interest); + if (_rs->addr_for_card_index(last_relevant_card_index) == end_range_of_interest) { + last_relevant_card_index--; + } + } + assert(card_index <= last_relevant_card_index, "sanity: card_index: %zu, last_relevant: %zu, left: " PTR_FORMAT + ", end_of_range: " PTR_FORMAT, card_index, last_relevant_card_index, p2i(left), p2i(end_range_of_interest)); + + // if marking context is valid and we are below tams, we use the marking bit map to find the first marked object that + // intersects with this card. If no such object exists, we return the first marked object that follows the start + // of this card's memory range if such an object is found at or before last_relevant_card_index. If there are no + // marked objects in this range, we return nullptr. + if ((ctx != nullptr) && (left < tams)) { + if (ctx->is_marked(left)) { + oop obj = cast_to_oop(left); + assert(oopDesc::is_oop(obj), "Should be an object"); + return left; + } + // get the previous marked object, if any + if (region->bottom() < left) { + // In the case that this region was most recently marked as young, the fact that this region has been promoted in place + // denotes that final mark (Young) has completed. In the case that this region was most recently marked as old, the + // fact that (ctx != nullptr) denotes that old marking has completed. Otherwise, ctx would equal null. + HeapWord* prev = ctx->get_prev_marked_addr(region->bottom(), left - 1); + if (prev < left) { + oop obj = cast_to_oop(prev); + assert(oopDesc::is_oop(obj), "Should be an object"); + HeapWord* obj_end = prev + obj->size(); + if (obj_end > left) { + return prev; + } + } + } + // Either prev >= left (no previous object found), or the previous object that was found ends before my card range begins. + // In eiher case, find the next marked object if any on this or a following card + assert(!ctx->is_marked(left), "Was dealt with above"); + assert(right > left, "We don't expect to be examining cards above the smaller of TAMS or top"); + HeapWord* next = ctx->get_next_marked_addr(left, end_of_search_next); + // If end_of_search_next < right, we may return tams here, which is "marked" by default + if (next < right) { + oop obj = cast_to_oop(next); + assert(oopDesc::is_oop(obj), "Should be an object"); + return next; + } else { + return nullptr; + } + } + + assert((ctx == nullptr) || (left >= tams), "Should have returned above"); + + // The following code assumes that all data in region at or above left holds parsable objects + assert((left >= tams) || ShenandoahGenerationalHeap::heap()->old_generation()->is_parsable(), + "The code that follows expects a parsable heap"); if (starts_object(card_index) && get_first_start(card_index) == 0) { - // This card contains a co-initial object; a fortiori, it covers - // also the case of a card being the first in a region. + // This card contains a co-initial object; a fortiori, it covers also the case of a card being the first in a region. assert(oopDesc::is_oop(cast_to_oop(left)), "Should be an object"); return left; } - HeapWord* p = nullptr; - oop obj = cast_to_oop(p); ssize_t cur_index = (ssize_t)card_index; assert(cur_index >= 0, "Overflow"); assert(cur_index > 0, "Should have returned above"); @@ -268,37 +322,80 @@ HeapWord* ShenandoahCardCluster::block_start(const size_t card_index) const { } // cur_index should start an object: we should not have walked // past the left end of the region. - assert(cur_index >= 0 && (cur_index <= (ssize_t)card_index), "Error"); + assert(cur_index >= 0 && (cur_index <= (ssize_t) card_index), "Error"); assert(region->bottom() <= _rs->addr_for_card_index(cur_index), "Fell off the bottom of containing region"); assert(starts_object(cur_index), "Error"); size_t offset = get_last_start(cur_index); // can avoid call via card size arithmetic below instead - p = _rs->addr_for_card_index(cur_index) + offset; + HeapWord* p = _rs->addr_for_card_index(cur_index) + offset; + if ((ctx != nullptr) && (p < tams)) { + if (ctx->is_marked(p)) { + oop obj = cast_to_oop(p); + assert(oopDesc::is_oop(obj), "Should be an object"); + assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr"); + assert(p + obj->size() > left, "This object should span start of card"); + assert(p < right, "Result must precede right"); + return p; + } else { + // Object that spans start of card is dead, so should not be scanned + assert((ctx == nullptr) || (left + get_first_start(card_index) >= tams), "Should have handled this case above"); + if (starts_object(card_index)) { + assert(left + get_first_start(card_index) < right, "Result must precede right"); + return left + get_first_start(card_index); + } else { + // Spanning object is dead and this card does not start an object, so the start object is in some card that follows + size_t following_card_index = card_index; + do { + following_card_index++; + if (following_card_index > last_relevant_card_index) { + return nullptr; + } + } while (!starts_object(following_card_index)); + assert(_rs->addr_for_card_index(following_card_index) + get_first_start(following_card_index), + "Result must precede right"); + return _rs->addr_for_card_index(following_card_index) + get_first_start(following_card_index); + } + } + } + // Recall that we already dealt with the co-initial object case above assert(p < left, "obj should start before left"); // While it is safe to ask an object its size in the loop that // follows, the (ifdef'd out) loop should never be needed. - // 1. we ask this question only for regions in the old generation + // 1. we ask this question only for regions in the old generation, and those + // that are not humongous regions // 2. there is no direct allocation ever by mutators in old generation - // regions. Only GC will ever allocate in old regions, and then - // too only during promotion/evacuation phases. Thus there is no danger + // regions walked by this code. Only GC will ever allocate in old regions, + // and then too only during promotion/evacuation phases. Thus there is no danger // of races between reading from and writing to the object start array, // or of asking partially initialized objects their size (in the loop below). + // Furthermore, humongous regions (and their dirty cards) are never processed + // by this code. // 3. only GC asks this question during phases when it is not concurrently // evacuating/promoting, viz. during concurrent root scanning (before // the evacuation phase) and during concurrent update refs (after the // evacuation phase) of young collections. This is never called - // during old or global collections. + // during global collections during marking or update refs.. // 4. Every allocation under TAMS updates the object start array. - NOT_PRODUCT(obj = cast_to_oop(p);) + oop obj = cast_to_oop(p); assert(oopDesc::is_oop(obj), "Should be an object"); +#ifdef ASSERT +#define WALK_FORWARD_IN_BLOCK_START true +#else #define WALK_FORWARD_IN_BLOCK_START false +#endif // ASSERT while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) { p += obj->size(); + obj = cast_to_oop(p); + assert(oopDesc::is_oop(obj), "Should be an object"); + assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr"); + // Check assumptions in previous block comment if this assert fires + guarantee(false, "Should never need forward walk in block start"); } -#undef WALK_FORWARD_IN_BLOCK_START // false - assert(p + obj->size() > left, "obj should end after left"); +#undef WALK_FORWARD_IN_BLOCK_START + assert(p <= left, "p should start at or before left end of card"); + assert(p + obj->size() > left, "obj should end after left end of card"); return p; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp index d04a768530f60..9a0d28d2cb754 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp @@ -353,6 +353,7 @@ class ShenandoahCardCluster: public CHeapObj { private: ShenandoahDirectCardMarkRememberedSet* _rs; + const HeapWord* _end_of_heap; public: static const size_t CardsPerCluster = 64; @@ -406,6 +407,7 @@ class ShenandoahCardCluster: public CHeapObj { ShenandoahCardCluster(ShenandoahDirectCardMarkRememberedSet* rs) { _rs = rs; + _end_of_heap = ShenandoahHeap::heap()->end(); _object_starts = NEW_C_HEAP_ARRAY(crossing_info, rs->total_cards() + 1, mtGC); // the +1 is to account for card table guarding entry for (size_t i = 0; i < rs->total_cards(); i++) { _object_starts[i].short_word = 0; @@ -652,12 +654,31 @@ class ShenandoahCardCluster: public CHeapObj { size_t get_last_start(size_t card_index) const; - // Given a card_index, return the starting address of the first block in the heap - // that straddles into the card. If the card is co-initial with an object, then - // this would return the starting address of the heap that this card covers. - // Expects to be called for a card affiliated with the old generation in - // generational mode. - HeapWord* block_start(size_t card_index) const; + // Given a card_index, return the starting address of the first live object in the heap + // that intersects with or follows this card. This must be a valid, parsable object, and must + // be the first such object that intersects with this card. The object may start before, + // at, or after the start of the card identified by card_index, and may end in or after the card. + // + // The tams argument represents top for the enclosing region at the start of the most recently + // initiated concurrent old marking effort. If ctx is non-null, we use the marking context to identify + // marked objects below tams. Above tams, we know that every object is marked and that the memory is + // parsable (so we can add an object's size to its address to find the next object). If ctx is null, + // we use crossing maps to find where object's start, and use object sizes to walk individual objects. + // The region must be parsable if ctx is null. + // + // The end_range_of_interest pointer argument represents an upper bound on how far we look in the forward direction + // for the first object in the heap that intersects or follows this card. If there are no live objects found at + // an address less than end_range_of_interest returns nullptr. + // + // Expects to be called for a card in a region affiliated with the old generation of the + // generational heap, otherwise behavior is undefined. + // + // If not null, ctx holds the complete marking context of the old generation. If null, + // we expect that the marking context isn't available and the crossing maps are valid. + // Note that crossing maps may be invalid following class unloading and before dead + // or unloaded objects have been coalesced and filled. Coalesce and fill updates the crossing maps. + HeapWord* first_object_start(size_t card_index, const ShenandoahMarkingContext* const ctx, + HeapWord* tams, HeapWord* end_range_of_interest) const; }; // ShenandoahScanRemembered is a concrete class representing the diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp index 919cc4f6fd796..e394daa68c0d8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp @@ -50,7 +50,7 @@ // degenerated execution, leading to dangling references. template void ShenandoahScanRemembered::process_clusters(size_t first_cluster, size_t count, HeapWord* end_of_range, - ClosureType* cl, bool use_write_table, uint worker_id) { + ClosureType* cl, bool use_write_table, uint worker_id) { assert(ShenandoahHeap::heap()->old_generation()->is_parsable(), "Old generation regions must be parsable for remembered set scan"); // If old-gen evacuation is active, then MarkingContext for old-gen heap regions is valid. We use the MarkingContext @@ -102,7 +102,7 @@ void ShenandoahScanRemembered::process_clusters(size_t first_cluster, size_t cou // tams and ctx below are for old generation marking. As such, young gen roots must // consider everything above tams, since it doesn't represent a TAMS for young gen's // SATB marking. - const HeapWord* tams = (ctx == nullptr ? region->bottom() : ctx->top_at_mark_start(region)); + HeapWord* const tams = (ctx == nullptr ? region->bottom() : ctx->top_at_mark_start(region)); NOT_PRODUCT(ShenandoahCardStats stats(whole_cards, card_stats(worker_id));) @@ -162,19 +162,37 @@ void ShenandoahScanRemembered::process_clusters(size_t first_cluster, size_t cou // [left, right) is a maximal right-open interval of dirty cards HeapWord* left = _rs->addr_for_card_index(dirty_l); // inclusive HeapWord* right = _rs->addr_for_card_index(dirty_r + 1); // exclusive + if (end_addr <= left) { + // The range of addresses to be scanned is empty + continue; + } // Clip right to end_addr established above (still exclusive) right = MIN2(right, end_addr); assert(right <= region->top() && end_addr <= region->top(), "Busted bounds"); const MemRegion mr(left, right); - // NOTE: We'll not call block_start() repeatedly - // on a very large object if its head card is dirty. If not, - // (i.e. the head card is clean) we'll call it each time we - // process a new dirty range on the object. This is always - // the case for large object arrays, which are typically more + // NOTE: We'll not call first_object_start() repeatedly + // on a very large object, i.e. one spanning multiple cards, + // if its head card is dirty. If not, (i.e. its head card is clean) + // we'll call it each time we process a new dirty range on the object. + // This is always the case for large object arrays, which are typically more // common. - HeapWord* p = _scc->block_start(dirty_l); + assert(ctx != nullptr || heap->old_generation()->is_parsable(), "Error"); + HeapWord* p = _scc->first_object_start(dirty_l, ctx, tams, right); + assert((p == nullptr) || (p < right), "No first object found is denoted by nullptr, p: " + PTR_FORMAT ", right: " PTR_FORMAT ", end_addr: " PTR_FORMAT ", next card addr: " PTR_FORMAT, + p2i(p), p2i(right), p2i(end_addr), p2i(_rs->addr_for_card_index(dirty_r + 1))); + if (p == nullptr) { + // There are no live objects to be scanned in this dirty range. cur_index identifies first card in this + // uninteresting dirty range. At top of next loop iteration, we will either end the looop + // (because cur_index < start_card_index) or we will begin the search for a range of clean cards. + continue; + } + oop obj = cast_to_oop(p); + assert(oopDesc::is_oop(obj), "Not an object at " PTR_FORMAT ", left: " PTR_FORMAT ", right: " PTR_FORMAT, + p2i(p), p2i(left), p2i(right)); + assert(ctx==nullptr || ctx->is_marked(obj), "Error"); // PREFIX: The object that straddles into this range of dirty cards // from the left may be subject to special treatment unless diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahMarkBitMap.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahMarkBitMap.cpp new file mode 100644 index 0000000000000..3dbb7c621226a --- /dev/null +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahMarkBitMap.cpp @@ -0,0 +1,571 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahMarkBitMap.hpp" +#include "gc/shenandoah/shenandoahMarkBitMap.inline.hpp" + +BEGIN_ALLOW_FORBIDDEN_FUNCTIONS +#include +END_ALLOW_FORBIDDEN_FUNCTIONS + +#include "memory/memRegion.hpp" +#include "unittest.hpp" + +#include "utilities/ostream.hpp" +#include "utilities/vmassert_reinstall.hpp" +#include "utilities/vmassert_uninstall.hpp" + +// These tests will all be skipped (unless Shenandoah becomes the default +// collector). To execute these tests, you must enable Shenandoah, which +// is done with: +// +// % make exploded-test TEST="gtest:ShenandoahOld*" CONF=release TEST_OPTS="JAVA_OPTIONS=-XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCMode=generational" +// +// Please note that these 'unit' tests are really integration tests and rely +// on the JVM being initialized. These tests manipulate the state of the +// collector in ways that are not compatible with a normal collection run. +// If these tests take longer than the minimum time between gc intervals - +// or, more likely, if you have them paused in a debugger longer than this +// interval - you can expect trouble. These tests will also not run in a build +// with asserts enabled because they use APIs that expect to run on a safepoint. + +#ifdef ASSERT +#define SKIP_IF_NOT_SHENANDOAH() \ + std::cout << "skipped (debug build)\n"; \ + return; +#else +#define SKIP_IF_NOT_SHENANDOAH() \ + if (!UseShenandoahGC) { \ + std::cout << "skipped\n"; \ + return; \ + } +#endif + +static bool _success; +static size_t _assertion_failures; + +#define MarkBitMapAssertEqual(a, b) EXPECT_EQ((a), (b)); if ((a) != (b)) { _assertion_failures++; } +#define MarkBitMapAssertTrue(a) EXPECT_TRUE((a)); if ((a) == 0) { _assertion_failures++; } + + +class ShenandoahMarkBitMapTest: public ::testing::Test { +protected: + + static void verify_bitmap_is_empty(HeapWord *start, size_t words_in_heap, ShenandoahMarkBitMap* mbm) { + MarkBitMapAssertTrue(mbm->is_bitmap_clear_range(start, start + words_in_heap)); + while (words_in_heap-- > 0) { + MarkBitMapAssertTrue(!mbm->is_marked(start)); + MarkBitMapAssertTrue(!mbm->is_marked_weak(start)); + MarkBitMapAssertTrue(!mbm->is_marked_strong(start)); + start++; + } + } + + static void verify_bitmap_is_weakly_marked(ShenandoahMarkBitMap* mbm, + HeapWord* weakly_marked_addresses[], size_t weakly_marked_objects) { + for (size_t i = 0; i < weakly_marked_objects; i++) { + HeapWord* obj_addr = weakly_marked_addresses[i]; + MarkBitMapAssertTrue(mbm->is_marked(obj_addr)); + MarkBitMapAssertTrue(mbm->is_marked_weak(obj_addr)); + } + } + + static void verify_bitmap_is_strongly_marked(ShenandoahMarkBitMap* mbm, + HeapWord* strongly_marked_addresses[], size_t strongly_marked_objects) { + for (size_t i = 0; i < strongly_marked_objects; i++) { + HeapWord* obj_addr = strongly_marked_addresses[i]; + MarkBitMapAssertTrue(mbm->is_marked(obj_addr)); + MarkBitMapAssertTrue(mbm->is_marked_strong(obj_addr)); + } + } + + static void verify_bitmap_all(ShenandoahMarkBitMap* mbm, HeapWord* all_marked_addresses[], + bool is_weakly_marked_object[], bool is_strongly_marked_object[], size_t all_marked_objects, + HeapWord* heap_memory, HeapWord* end_of_heap_memory) { + HeapWord* last_marked_addr = &heap_memory[-1]; + for (size_t i = 0; i < all_marked_objects; i++) { + HeapWord* obj_addr = all_marked_addresses[i]; + if (is_strongly_marked_object[i]) { + MarkBitMapAssertTrue(mbm->is_marked(obj_addr)); + MarkBitMapAssertTrue(mbm->is_marked_strong(obj_addr)); + } + if (is_weakly_marked_object[i]) { + MarkBitMapAssertTrue(mbm->is_marked(obj_addr)); + MarkBitMapAssertTrue(mbm->is_marked_weak(obj_addr)); + } + while (++last_marked_addr < obj_addr) { + MarkBitMapAssertTrue(!mbm->is_marked(last_marked_addr)); + MarkBitMapAssertTrue(!mbm->is_marked_strong(last_marked_addr)); + MarkBitMapAssertTrue(!mbm->is_marked_weak(last_marked_addr)); + } + last_marked_addr = obj_addr; + } + while (++last_marked_addr < end_of_heap_memory) { + MarkBitMapAssertTrue(!mbm->is_marked(last_marked_addr)); + MarkBitMapAssertTrue(!mbm->is_marked_strong(last_marked_addr)); + MarkBitMapAssertTrue(!mbm->is_marked_weak(last_marked_addr)); + } + + HeapWord* next_marked = (HeapWord*) &heap_memory[0] - 1; + for (size_t i = 0; i < all_marked_objects; i++) { + next_marked = mbm->get_next_marked_addr(next_marked + 1, end_of_heap_memory); + MarkBitMapAssertTrue(mbm->is_marked(next_marked)); + MarkBitMapAssertEqual(next_marked, all_marked_addresses[i]); + if (is_strongly_marked_object[i]) { + MarkBitMapAssertTrue(mbm->is_marked_strong(next_marked)); + } + if (is_weakly_marked_object[i]) { + MarkBitMapAssertTrue(mbm->is_marked_weak(next_marked)); + } + } + // We expect no more marked addresses to be found. Should return limit. + HeapWord* sentinel = mbm->get_next_marked_addr(next_marked + 1, end_of_heap_memory); + MarkBitMapAssertEqual(sentinel, end_of_heap_memory); + + HeapWord* prev_marked = end_of_heap_memory + 1; + for (int i = (int) all_marked_objects - 1; i >= 0; i--) { + prev_marked = mbm->get_prev_marked_addr(&heap_memory[0], prev_marked - 1); + MarkBitMapAssertEqual(prev_marked, all_marked_addresses[i]); + MarkBitMapAssertTrue(mbm->is_marked(prev_marked)); + if (is_strongly_marked_object[i]) { + MarkBitMapAssertTrue(mbm->is_marked_strong(prev_marked)); + } + if (is_weakly_marked_object[i]) { + MarkBitMapAssertTrue(mbm->is_marked_weak(prev_marked)); + } + } + // We expect no more marked addresses to be found. should return prev_marked. + sentinel = mbm->get_prev_marked_addr(&heap_memory[0], prev_marked - 1); + MarkBitMapAssertEqual(sentinel, prev_marked); + } + +public: + + static bool run_test() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t heap_size = heap->max_capacity(); + size_t heap_size_words = heap_size / HeapWordSize; + HeapWord* my_heap_memory = heap->base(); + HeapWord* end_of_my_heap = my_heap_memory + heap_size_words; + MemRegion heap_descriptor(my_heap_memory, heap_size_words); + + _success = false; + _assertion_failures = 0; + + size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_size); + size_t bitmap_size = align_up(bitmap_size_orig, bitmap_page_size); + size_t bitmap_word_size = (bitmap_size + HeapWordSize - 1) / HeapWordSize; + + HeapWord* my_bitmap_memory = NEW_C_HEAP_ARRAY(HeapWord, bitmap_word_size, mtGC); + + MarkBitMapAssertTrue(my_bitmap_memory != nullptr); + if (my_bitmap_memory == nullptr) { + std::cout <<"Cannot run test because failed to allocate bitmap memory\n" << std::flush; + return false; + } + MemRegion bitmap_descriptor(my_bitmap_memory, bitmap_size / HeapWordSize); + ShenandoahMarkBitMap mbm(heap_descriptor, bitmap_descriptor); + + mbm.clear_range_large(heap_descriptor); + verify_bitmap_is_empty((HeapWord*) my_heap_memory, heap_size_words, &mbm); + + HeapWord* weakly_marked_addresses[] = { + (HeapWord*) &my_heap_memory[13], + (HeapWord*) &my_heap_memory[14], + (HeapWord*) &my_heap_memory[15], + (HeapWord*) &my_heap_memory[16], + (HeapWord*) &my_heap_memory[176], + (HeapWord*) &my_heap_memory[240], + (HeapWord*) &my_heap_memory[480], + (HeapWord*) &my_heap_memory[1360], + (HeapWord*) &my_heap_memory[1488], + (HeapWord*) &my_heap_memory[2416], + (HeapWord*) &my_heap_memory[5968], + (HeapWord*) &my_heap_memory[8191], + (HeapWord*) &my_heap_memory[8192], + (HeapWord*) &my_heap_memory[8193] + }; + size_t weakly_marked_objects = sizeof(weakly_marked_addresses) / sizeof(HeapWord*); + for (size_t i = 0; i < weakly_marked_objects; i++) { + mbm.mark_weak(weakly_marked_addresses[i]); + } + HeapWord* next_marked = (HeapWord*) &my_heap_memory[0] - 1; + for (size_t i = 0; i < weakly_marked_objects; i++) { + next_marked = mbm.get_next_marked_addr(next_marked + 1, end_of_my_heap); + MarkBitMapAssertEqual(next_marked, weakly_marked_addresses[i]); + MarkBitMapAssertTrue(mbm.is_marked(next_marked)); + MarkBitMapAssertTrue(mbm.is_marked_weak(next_marked)); + MarkBitMapAssertTrue(!mbm.is_marked_strong(next_marked)); + } + // We expect no more marked addresses to be found. Should return limit. + HeapWord* sentinel = mbm.get_next_marked_addr(next_marked + 1, end_of_my_heap); + HeapWord* heap_limit = end_of_my_heap; + MarkBitMapAssertEqual(sentinel, heap_limit); + HeapWord* prev_marked = end_of_my_heap + 1;; + for (int i = (int) weakly_marked_objects - 1; i >= 0; i--) { + // to be renamed get_prev_marked_addr() + prev_marked = mbm.get_prev_marked_addr(&my_heap_memory[0], prev_marked - 1); + MarkBitMapAssertEqual(prev_marked, weakly_marked_addresses[i]); + MarkBitMapAssertTrue(mbm.is_marked(prev_marked)); + MarkBitMapAssertTrue(mbm.is_marked_weak(prev_marked)); + MarkBitMapAssertTrue(!mbm.is_marked_strong(prev_marked)); + } + // We expect no more marked addresses to be found. should return prev_marked. + sentinel = mbm.get_prev_marked_addr(&my_heap_memory[0], prev_marked - 1); + // MarkBitMapAssertEqual(sentinel, prev_marked); + MarkBitMapAssertEqual(sentinel, prev_marked); + verify_bitmap_is_weakly_marked(&mbm, weakly_marked_addresses, weakly_marked_objects); + + HeapWord* strongly_marked_addresses[] = { + (HeapWord*) &my_heap_memory[8], + (HeapWord*) &my_heap_memory[24], + (HeapWord*) &my_heap_memory[32], + (HeapWord*) &my_heap_memory[56], + (HeapWord*) &my_heap_memory[64], + (HeapWord*) &my_heap_memory[168], + (HeapWord*) &my_heap_memory[232], + (HeapWord*) &my_heap_memory[248], + (HeapWord*) &my_heap_memory[256], + (HeapWord*) &my_heap_memory[257], + (HeapWord*) &my_heap_memory[258], + (HeapWord*) &my_heap_memory[259], + (HeapWord*) &my_heap_memory[488], + (HeapWord*) &my_heap_memory[1352], + (HeapWord*) &my_heap_memory[1496], + (HeapWord*) &my_heap_memory[2432], + (HeapWord*) &my_heap_memory[5960] + }; + size_t strongly_marked_objects = sizeof(strongly_marked_addresses) / sizeof(HeapWord*); + for (size_t i = 0; i < strongly_marked_objects; i++) { + bool upgraded = false; + mbm.mark_strong(strongly_marked_addresses[i], upgraded); + MarkBitMapAssertTrue(!upgraded); + } + verify_bitmap_is_strongly_marked(&mbm, strongly_marked_addresses, strongly_marked_objects); + HeapWord* upgraded_weakly_marked_addresses[] = { + (HeapWord*) &my_heap_memory[240], + (HeapWord*) &my_heap_memory[1360], + }; + size_t upgraded_weakly_marked_objects = sizeof(upgraded_weakly_marked_addresses) / sizeof(HeapWord *); + for (size_t i = 0; i < upgraded_weakly_marked_objects; i++) { + bool upgraded = false; + mbm.mark_strong(upgraded_weakly_marked_addresses[i], upgraded); + MarkBitMapAssertTrue(upgraded); + } + verify_bitmap_is_strongly_marked(&mbm, upgraded_weakly_marked_addresses, upgraded_weakly_marked_objects); + + HeapWord* all_marked_addresses[] = { + (HeapWord*) &my_heap_memory[8], /* strongly marked */ + (HeapWord*) &my_heap_memory[13], /* weakly marked */ + (HeapWord*) &my_heap_memory[14], /* weakly marked */ + (HeapWord*) &my_heap_memory[15], /* weakly marked */ + (HeapWord*) &my_heap_memory[16], /* weakly marked */ + (HeapWord*) &my_heap_memory[24], /* strongly marked */ + (HeapWord*) &my_heap_memory[32], /* strongly marked */ + (HeapWord*) &my_heap_memory[56], /* strongly marked */ + (HeapWord*) &my_heap_memory[64], /* strongly marked */ + (HeapWord*) &my_heap_memory[168], /* strongly marked */ + (HeapWord*) &my_heap_memory[176], /* weakly marked */ + (HeapWord*) &my_heap_memory[232], /* strongly marked */ + (HeapWord*) &my_heap_memory[240], /* weakly marked upgraded to strongly marked */ + (HeapWord*) &my_heap_memory[248], /* strongly marked */ + (HeapWord*) &my_heap_memory[256], /* strongly marked */ + (HeapWord*) &my_heap_memory[257], /* strongly marked */ + (HeapWord*) &my_heap_memory[258], /* strongly marked */ + (HeapWord*) &my_heap_memory[259], /* strongly marked */ + (HeapWord*) &my_heap_memory[480], /* weakly marked */ + (HeapWord*) &my_heap_memory[488], /* strongly marked */ + (HeapWord*) &my_heap_memory[1352], /* strongly marked */ + (HeapWord*) &my_heap_memory[1360], /* weakly marked upgraded to strongly marked */ + (HeapWord*) &my_heap_memory[1488], /* weakly marked */ + (HeapWord*) &my_heap_memory[1496], /* strongly marked */ + (HeapWord*) &my_heap_memory[2416], /* weakly marked */ + (HeapWord*) &my_heap_memory[2432], /* strongly marked */ + (HeapWord*) &my_heap_memory[5960], /* strongly marked */ + (HeapWord*) &my_heap_memory[5968], /* weakly marked */ + (HeapWord*) &my_heap_memory[8191], /* weakly marked */ + (HeapWord*) &my_heap_memory[8192], /* weakly marked */ + (HeapWord*) &my_heap_memory[8193] /* weakly marked */ + }; + size_t all_marked_objects = sizeof(all_marked_addresses) / sizeof(HeapWord*); + bool is_weakly_marked_object[] = { + false, + true, + true, + true, + true, + false, + false, + false, + false, + false, + true, + false, + true, + false, + false, + false, + false, + false, + true, + false, + false, + true, + true, + false, + true, + false, + false, + true, + true, + true, + true + }; + bool is_strongly_marked_object[] = { + true, + false, + false, + false, + false, + true, + true, + true, + true, + true, + false, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + false, + true, + false, + true, + true, + false, + false, + false, + false + }; + verify_bitmap_all(&mbm, all_marked_addresses, is_weakly_marked_object, is_strongly_marked_object, all_marked_objects, + my_heap_memory, end_of_my_heap); + + MemRegion first_clear_region(&my_heap_memory[168], &my_heap_memory[256]); + mbm.clear_range_large(first_clear_region); + // Five objects are no longer marked + HeapWord* all_marked_addresses_after_first_clear[] = { + (HeapWord*) &my_heap_memory[8], /* strongly marked */ + (HeapWord*) &my_heap_memory[13], /* weakly marked */ + (HeapWord*) &my_heap_memory[14], /* weakly marked */ + (HeapWord*) &my_heap_memory[15], /* weakly marked */ + (HeapWord*) &my_heap_memory[16], /* weakly marked */ + (HeapWord*) &my_heap_memory[24], /* strongly marked */ + (HeapWord*) &my_heap_memory[32], /* strongly marked */ + (HeapWord*) &my_heap_memory[56], /* strongly marked */ + (HeapWord*) &my_heap_memory[64], /* strongly marked */ + (HeapWord*) &my_heap_memory[256], /* strongly marked */ + (HeapWord*) &my_heap_memory[257], /* strongly marked */ + (HeapWord*) &my_heap_memory[258], /* strongly marked */ + (HeapWord*) &my_heap_memory[259], /* strongly marked */ + (HeapWord*) &my_heap_memory[480], /* weakly marked */ + (HeapWord*) &my_heap_memory[488], /* strongly marked */ + (HeapWord*) &my_heap_memory[1352], /* strongly marked */ + (HeapWord*) &my_heap_memory[1360], /* weakly marked upgraded to strongly marked */ + (HeapWord*) &my_heap_memory[1488], /* weakly marked */ + (HeapWord*) &my_heap_memory[1496], /* strongly marked */ + (HeapWord*) &my_heap_memory[2416], /* weakly marked */ + (HeapWord*) &my_heap_memory[2432], /* strongly marked */ + (HeapWord*) &my_heap_memory[5960], /* strongly marked */ + (HeapWord*) &my_heap_memory[5968], /* weakly marked */ + (HeapWord*) &my_heap_memory[8191], /* weakly marked */ + (HeapWord*) &my_heap_memory[8192], /* weakly marked */ + (HeapWord*) &my_heap_memory[8193] /* weakly marked */ + }; + size_t all_marked_objects_after_first_clear = sizeof(all_marked_addresses_after_first_clear) / sizeof(HeapWord*); + bool is_weakly_marked_object_after_first_clear[] = { + false, + true, + true, + true, + true, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + true, + true, + false, + true, + false, + false, + true, + true, + true, + true + }; + bool is_strongly_marked_object_after_first_clear[] = { + true, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + false, + true, + false, + true, + true, + false, + false, + false, + false + }; + verify_bitmap_all(&mbm, all_marked_addresses_after_first_clear, + is_weakly_marked_object_after_first_clear, is_strongly_marked_object_after_first_clear, + all_marked_objects_after_first_clear, my_heap_memory, end_of_my_heap); + + MemRegion second_clear_region(&my_heap_memory[1360], &my_heap_memory[2416]); + mbm.clear_range_large(second_clear_region); + // Five objects are no longer marked + HeapWord* all_marked_addresses_after_2nd_clear[] = { + (HeapWord*) &my_heap_memory[8], /* strongly marked */ + (HeapWord*) &my_heap_memory[13], /* weakly marked */ + (HeapWord*) &my_heap_memory[14], /* weakly marked */ + (HeapWord*) &my_heap_memory[15], /* weakly marked */ + (HeapWord*) &my_heap_memory[16], /* weakly marked */ + (HeapWord*) &my_heap_memory[24], /* strongly marked */ + (HeapWord*) &my_heap_memory[32], /* strongly marked */ + (HeapWord*) &my_heap_memory[56], /* strongly marked */ + (HeapWord*) &my_heap_memory[64], /* strongly marked */ + (HeapWord*) &my_heap_memory[256], /* strongly marked */ + (HeapWord*) &my_heap_memory[257], /* strongly marked */ + (HeapWord*) &my_heap_memory[258], /* strongly marked */ + (HeapWord*) &my_heap_memory[259], /* strongly marked */ + (HeapWord*) &my_heap_memory[480], /* weakly marked */ + (HeapWord*) &my_heap_memory[488], /* strongly marked */ + (HeapWord*) &my_heap_memory[1352], /* strongly marked */ + (HeapWord*) &my_heap_memory[2416], /* weakly marked */ + (HeapWord*) &my_heap_memory[2432], /* strongly marked */ + (HeapWord*) &my_heap_memory[5960], /* strongly marked */ + (HeapWord*) &my_heap_memory[5968], /* weakly marked */ + (HeapWord*) &my_heap_memory[8191], /* weakly marked */ + (HeapWord*) &my_heap_memory[8192], /* weakly marked */ + (HeapWord*) &my_heap_memory[8193] /* weakly marked */ + }; + size_t all_marked_objects_after_2nd_clear = sizeof(all_marked_addresses_after_2nd_clear) / sizeof(HeapWord*); + bool is_weakly_marked_object_after_2nd_clear[] = { + false, + true, + true, + true, + true, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + true, + false, + false, + true, + true, + true, + true + }; + bool is_strongly_marked_object_after_2nd_clear[] = { + true, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + false, + true, + true, + false, + false, + false, + false + }; + verify_bitmap_all(&mbm, all_marked_addresses_after_2nd_clear, + is_weakly_marked_object_after_2nd_clear, is_strongly_marked_object_after_2nd_clear, + all_marked_objects_after_2nd_clear, my_heap_memory, end_of_my_heap); + + FREE_C_HEAP_ARRAY(HeapWord, my_bitmap_memory); + _success = true; + return true; + } +}; + +TEST_VM_F(ShenandoahMarkBitMapTest, minimum_test) { + SKIP_IF_NOT_SHENANDOAH(); + + bool result = ShenandoahMarkBitMapTest::run_test(); + ASSERT_EQ(result, true); + ASSERT_EQ(_success, true); + ASSERT_EQ(_assertion_failures, (size_t) 0); +} diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp index b661bd6bc5717..b184b19ce6c12 100644 --- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp @@ -49,7 +49,7 @@ #else #define SKIP_IF_NOT_SHENANDOAH() \ if (!UseShenandoahGC) { \ - tty->print_cr("skipped"); \ + std::cout << "skipped\n"; \ return; \ } #endif diff --git a/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java b/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java index 2e98c72ee176b..0775e5baadd2c 100644 --- a/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java +++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java @@ -357,6 +357,7 @@ * -XX:TieredStopAtLevel=4 * TestClone */ + public class TestClone { public static void main(String[] args) throws Exception {