Skip to content

Commit

Permalink
Runtime compressed refs work
Browse files Browse the repository at this point in the history
Remove explicit and implicit (pointer math) uses of
sizeof(fomrobject_t).

The example and fvtest code are not updated in this change.

Signed-off-by: Graham Chapman <graham_chapman@ca.ibm.com>
  • Loading branch information
gacholio committed Jan 13, 2020
1 parent 947946e commit 688c417
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 65 deletions.
4 changes: 2 additions & 2 deletions gc/base/IndexableObjectScanner.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2015, 2019 IBM Corp. and others
* Copyright (c) 2015, 2020 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -51,7 +51,7 @@ class GC_IndexableObjectScanner : public GC_ObjectScanner
* @param[in] scanPtr pointer to the array cell where scanning will start
* @param[in] endPtr pointer to the array cell where scanning will stop
* @param[in] scanMap first portion of bitmap for slots to scan
* @param[in] elementSize array element size must be aligned to sizeof(fomrobject_t)
* @param[in] elementSize array element size must be aligned to the size of an object to object reference
* @param[in] flags scanning context flags
*/
GC_IndexableObjectScanner(
Expand Down
5 changes: 1 addition & 4 deletions gc/base/ObjectModelBase.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 1991, 2019 IBM Corp. and others
* Copyright (c) 1991, 2020 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -901,9 +901,6 @@ class GC_ObjectModelBase : public MM_BaseVirtual
: _delegate((fomrobject_t)OMR_OBJECT_METADATA_FLAGS_MASK)
{
_typeId = __FUNCTION__;
#if defined(OBJECT_MODEL_MODRON_ASSERTIONS)
Assert_MM_true((8 * (sizeof(fomrobject_t) - 1)) >= _delegate.getObjectHeaderSlotFlagsShift());
#endif /* defined(OBJECT_MODEL_MODRON_ASSERTIONS) */
}
};
#if defined(OMR_EXAMPLE)
Expand Down
59 changes: 27 additions & 32 deletions gc/base/ObjectScanner.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2015, 2019 IBM Corp. and others
* Copyright (c) 2015, 2020 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -62,6 +62,9 @@ class GC_ObjectScanner : public MM_BaseVirtual
fomrobject_t *_scanPtr; /**< Pointer to base of object slots mapped by current _scanMap */
GC_SlotObject _slotObject; /**< Create own SlotObject class to provide output */
uintptr_t _flags; /**< Scavenger context flags (scanRoots, scanHeap, ...) */
#if defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS)
bool const _compressObjectReferences;
#endif /* defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) */

public:
/**
Expand Down Expand Up @@ -101,6 +104,9 @@ class GC_ObjectScanner : public MM_BaseVirtual
, _scanPtr(scanPtr)
, _slotObject(env->getOmrVM(), NULL)
, _flags(flags | headObjectScanner)
#if defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS)
, _compressObjectReferences(env->compressObjectReferences())
#endif /* defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) */
{
_typeId = __FUNCTION__;
}
Expand All @@ -123,42 +129,29 @@ class GC_ObjectScanner : public MM_BaseVirtual
*
* @param[in] env Current environment
* @see getNextSlotMap()
* @see putNextSlotMapBit()
*/
MMINLINE void
initialize(MM_EnvironmentBase *env)
{
}

public:
/**
* Helper function can be used to rebuild bit map of reference fields in
* implementation of getNextSlotMap(). Simply call this method once for
* each object slot holding a reference pointer. Best to present reference
* fields in increasing address order until method returns false or no
* more fields.
*
* If the method returns false, the field presented in the call will not
* be included in the slot map and must be presented first in the next
* call to getNextSlotMap().
* Return back true if object references are compressed
* @return true, if object references are compressed
*/
MMINLINE bool
putNextSlotMapBit(fomrobject_t *nextSlotAddress)
{
if (0 != _scanMap) {
intptr_t bitOffset = nextSlotAddress - _scanPtr;
if (_bitsPerScanMap < bitOffset) {
_scanMap |= (uintptr_t)1 << bitOffset;
} else {
return false;
}
} else {
_scanPtr = nextSlotAddress;
_scanMap = 1;
}
MMINLINE bool compressObjectReferences() {
#if defined(OMR_GC_COMPRESSED_POINTERS)
#if defined(OMR_GC_FULL_POINTERS)
return _compressObjectReferences;
#else /* defined(OMR_GC_FULL_POINTERS) */
return true;
#endif /* defined(OMR_GC_FULL_POINTERS) */
#else /* defined(OMR_GC_COMPRESSED_POINTERS) */
return false;
#endif /* defined(OMR_GC_COMPRESSED_POINTERS) */
}

public:
/**
* Leaf objects contain no reference slots (eg plain value object or empty array).
*
Expand Down Expand Up @@ -186,16 +179,17 @@ class GC_ObjectScanner : public MM_BaseVirtual
MMINLINE GC_SlotObject *
getNextSlot()
{
bool const compressed = compressObjectReferences();
while (NULL != _scanPtr) {
/* while there is at least one bit-mapped slot, advance scan ptr to a non-NULL slot or end of map */
while ((0 != _scanMap) && ((0 == (1 & _scanMap)) || (0 == *_scanPtr))) {
_scanPtr += 1;
while ((0 != _scanMap) && ((0 == (1 & _scanMap)) || (0 == (compressed ? (uintptr_t)*(uint32_t*)_scanPtr : *(uintptr_t*)_scanPtr)))) {
_scanPtr = GC_SlotObject::addToSlotAddress(_scanPtr, 1, compressed);
_scanMap >>= 1;
}
if (0 != _scanMap) {
/* set up to return slot object for non-NULL slot at scan ptr and advance scan ptr */
_slotObject.writeAddressToSlot(_scanPtr);
_scanPtr += 1;
_scanPtr = GC_SlotObject::addToSlotAddress(_scanPtr, 1, compressed);
_scanMap >>= 1;
return &_slotObject;
}
Expand Down Expand Up @@ -257,18 +251,19 @@ class GC_ObjectScanner : public MM_BaseVirtual
MMINLINE GC_SlotObject *
getNextSlot(bool* isLeafSlot)
{
bool const compressed = compressObjectReferences();
while (NULL != _scanPtr) {
/* while there is at least one bit-mapped slot, advance scan ptr to a non-NULL slot or end of map */
while ((0 != _scanMap) && ((0 == (1 & _scanMap)) || (0 == *_scanPtr))) {
_scanPtr += 1;
while ((0 != _scanMap) && ((0 == (1 & _scanMap)) || (0 == (compressed ? (uintptr_t)*(uint32_t*)_scanPtr : *(uintptr_t*)_scanPtr)))) {
_scanPtr = GC_SlotObject::addToSlotAddress(_scanPtr, 1, compressed);
_scanMap >>= 1;
_leafMap >>= 1;
}
if (0 != _scanMap) {
/* set up to return slot object for non-NULL slot at scan ptr and advance scan ptr */
_slotObject.writeAddressToSlot(_scanPtr);
*isLeafSlot = (0 != (1 & _leafMap));
_scanPtr += 1;
_scanPtr = GC_SlotObject::addToSlotAddress(_scanPtr, 1, compressed);
_scanMap >>= 1;
_leafMap >>= 1;
return &_slotObject;
Expand Down
88 changes: 66 additions & 22 deletions gc/base/SlotObject.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 1991, 2019 IBM Corp. and others
* Copyright (c) 1991, 2020 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -47,18 +47,6 @@ class GC_SlotObject
public:

private:
/* Inlined version of converting a compressed token to an actual pointer */
MMINLINE omrobjectptr_t
convertPointerFromToken(fomrobject_t token)
{
uintptr_t value = (uintptr_t)token;
#if defined (OMR_GC_COMPRESSED_POINTERS)
if (compressObjectReferences()) {
value <<= _compressedPointersShift;
}
#endif /* OMR_GC_COMPRESSED_POINTERS */
return (omrobjectptr_t)value;
}
/* Inlined version of converting a pointer to a compressed token */
MMINLINE fomrobject_t
convertTokenFromPointer(omrobjectptr_t pointer)
Expand All @@ -73,6 +61,44 @@ class GC_SlotObject
}

public:
/**
* log2(size of an object to object reference)
*
* @param[in] compressed true if object to object references are compressed, false if not
* @return the shift value
*/
MMINLINE static uintptr_t logReferenceSize(bool compressed) { return compressed ? 2 : OMR_LOG_POINTER_SIZE; }

/**
* Calculate the difference between two object slot addresses, in slots
*
* @param[in] p1 the value to be subtracted from
* @param[in] p2 the value to be subtracted
* @param[in] compressed true if object to object references are compressed, false if not
* @return p1 - p2 in slots
*/
MMINLINE static uintptr_t subtractSlotAddresses(fomrobject_t *p1, fomrobject_t *p2, bool compressed) { return ((uintptr_t)p1 - (uintptr_t)p2) >> logReferenceSize(compressed); }

/**
* Calculate the addition of an integer to an object slot address
*
* @param[in] base the base slot pointer
* @param[in] index the index to add
* @param[in] compressed true if object to object references are compressed, false if not
* @return the adjusted address
*/
MMINLINE static fomrobject_t *addToSlotAddress(fomrobject_t * base, uintptr_t index, bool compressed) { return (fomrobject_t*)((uintptr_t)base + (index << logReferenceSize(compressed))); }

/**
* Calculate the subtraction of an integer from an object slot address
*
* @param[in] base the base slot pointer
* @param[in] index the index to subtract
* @param[in] compressed true if object to object references are compressed, false if not
* @return the adjusted address
*/
MMINLINE static fomrobject_t *subtractFromSlotAddress(fomrobject_t * base, uintptr_t index, bool compressed) { return (fomrobject_t*)((uintptr_t)base - (index << logReferenceSize(compressed))); }

/**
* Return back true if object references are compressed
* @return true, if object references are compressed
Expand All @@ -95,7 +121,16 @@ class GC_SlotObject
*/
MMINLINE omrobjectptr_t readReferenceFromSlot()
{
return convertPointerFromToken(*_slot);
omrobjectptr_t value = NULL;
#if defined (OMR_GC_COMPRESSED_POINTERS)
if (compressObjectReferences()) {
value = (omrobjectptr_t)(((uintptr_t)*(uint32_t volatile *)_slot) << _compressedPointersShift);
} else
#endif /* OMR_GC_COMPRESSED_POINTERS */
{
value = (omrobjectptr_t)*(uintptr_t volatile *)_slot;
}
return value;
}

/**
Expand All @@ -114,9 +149,13 @@ class GC_SlotObject
*/
MMINLINE void writeReferenceToSlot(omrobjectptr_t reference)
{
fomrobject_t compressed = convertTokenFromPointer(reference);
if (compressed != *_slot) {
*_slot = compressed;
#if defined (OMR_GC_COMPRESSED_POINTERS)
if (compressObjectReferences()) {
*(uint32_t volatile *)_slot = (uint32_t)((uintptr_t)reference >> _compressedPointersShift);
} else
#endif /* OMR_GC_COMPRESSED_POINTERS */
{
*(uintptr_t volatile *)_slot = (uintptr_t)reference;
}
}

Expand All @@ -128,14 +167,19 @@ class GC_SlotObject
MMINLINE bool atomicWriteReferenceToSlot(omrobjectptr_t oldReference, omrobjectptr_t newReference)
{
/* Caller should ensure oldReference != newReference */
fomrobject_t compressedOld = convertTokenFromPointer(oldReference);
fomrobject_t compressedNew = convertTokenFromPointer(newReference);
uintptr_t oldValue = (uintptr_t)oldReference;
uintptr_t newValue = (uintptr_t)newReference;
bool swapResult = false;

#if defined (OMR_GC_COMPRESSED_POINTERS)
if (compressObjectReferences()) {
swapResult = ((uint32_t)(uintptr_t)compressedOld == MM_AtomicOperations::lockCompareExchangeU32((uint32_t *)_slot, (uint32_t)(uintptr_t)compressedOld, (uint32_t)(uintptr_t)compressedNew));
} else {
swapResult = ((uintptr_t)compressedOld == MM_AtomicOperations::lockCompareExchange((uintptr_t *)_slot, (uintptr_t)compressedOld, (uintptr_t)compressedNew));
uint32_t oldCompressed = (uint32_t)(oldValue >> _compressedPointersShift);
uint32_t newCompressed = (uint32_t)(newValue >> _compressedPointersShift);
swapResult = (oldCompressed == MM_AtomicOperations::lockCompareExchangeU32((uint32_t volatile *)_slot, oldCompressed, newCompressed));
} else
#endif /* OMR_GC_COMPRESSED_POINTERS */
{
swapResult = (oldValue == MM_AtomicOperations::lockCompareExchange((uintptr_t volatile *)_slot, oldValue, newValue));
}

return swapResult;
Expand Down
7 changes: 4 additions & 3 deletions gc/base/segregated/MemoryPoolSegregated.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 1991, 2015 IBM Corp. and others
* Copyright (c) 1991, 2020 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -139,6 +139,7 @@ MM_MemoryPoolSegregated::allocateChunkedArray(MM_EnvironmentBase *env, MM_Alloca
fomrobject_t *arrayoidPtr = _extensions->indexableObjectModel.getArrayoidPointer(spine);
Assert_MM_true(totalBytes >= spineBytes);
uintptr_t bytesRemaining = totalBytes - spineBytes;
bool const compressed = compressObjectReferences();
for (uintptr_t i=0; i<numberArraylets; i++) {
uintptr_t* arraylet = NULL;
if (0 < bytesRemaining) {
Expand All @@ -148,7 +149,7 @@ MM_MemoryPoolSegregated::allocateChunkedArray(MM_EnvironmentBase *env, MM_Alloca
env->getAllocationContext()->flush(env);

for (uintptr_t j=0; j<i; j++) {
GC_SlotObject slotObject(env->getOmrVM(), &arrayoidPtr[j]);
GC_SlotObject slotObject(env->getOmrVM(), GC_SlotObject::addToSlotAddress(arrayoidPtr, j, compressed));
arraylet = (uintptr_t*)slotObject.readReferenceFromSlot();

MM_HeapRegionDescriptorSegregated *region = (MM_HeapRegionDescriptorSegregated *)regionManager->tableDescriptorForAddress(arraylet);
Expand Down Expand Up @@ -180,7 +181,7 @@ MM_MemoryPoolSegregated::allocateChunkedArray(MM_EnvironmentBase *env, MM_Alloca
*/
Assert_MM_true(i == numberArraylets - 1);
}
GC_SlotObject slotObject(env->getOmrVM(), &arrayoidPtr[i]);
GC_SlotObject slotObject(env->getOmrVM(), GC_SlotObject::addToSlotAddress(arrayoidPtr, i, compressed));
slotObject.writeReferenceToSlot((omrobjectptr_t)arraylet);
bytesRemaining = MM_Math::saturatingSubtract(bytesRemaining, arrayletLeafSize);
}
Expand Down
5 changes: 3 additions & 2 deletions gc/base/standard/ConcurrentGC.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 1991, 2018 IBM Corp. and others
* Copyright (c) 1991, 2020 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -2770,6 +2770,7 @@ MM_ConcurrentGC::localMark(MM_EnvironmentBase *env, uintptr_t sizeToTrace)
{
omrobjectptr_t objectPtr;
uintptr_t gcCount = _extensions->globalGCStats.gcCount;
uint32_t const referenceSize = env->compressObjectReferences() ? sizeof(uint32_t) : sizeof(uintptr_t);

env->_workStack.reset(env, _markingScheme->getWorkPackets());
Assert_MM_true(env->_cycleState == NULL);
Expand All @@ -2785,7 +2786,7 @@ MM_ConcurrentGC::localMark(MM_EnvironmentBase *env, uintptr_t sizeToTrace)
} else if (((MM_ConcurrentCardTable *)_cardTable)->isObjectInActiveTLH(env,objectPtr)) {
env->_workStack.pushDefer(env,objectPtr);
/* We are deferring the tracing but get some "tracing credit" */
sizeTraced += sizeof(fomrobject_t);
sizeTraced += referenceSize;
} else if (((MM_ConcurrentCardTable *)_cardTable)->isObjectInUncleanedDirtyCard(env,objectPtr)) {
/* Dont need to trace this object now as we will re-visit it
* later when we clean the card during concurrent card cleaning
Expand Down

0 comments on commit 688c417

Please sign in to comment.