Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement ARM32 atomic intrinsics #97792

Open
wants to merge 15 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ public static partial class Interlocked
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int Exchange(ref int location1, int value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return Exchange(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down Expand Up @@ -130,7 +130,7 @@ public static long Exchange(ref long location1, long value)
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int CompareExchange(ref int location1, int value, int comparand)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return CompareExchange(ref location1, value, comparand); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down Expand Up @@ -229,7 +229,7 @@ public static long CompareExchange(ref long location1, long value, long comparan
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static int ExchangeAdd(ref int location1, int value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return ExchangeAdd(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down
253 changes: 253 additions & 0 deletions src/coreclr/jit/codegenarm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@ bool CodeGen::genInstrWithConstant(
{
case INS_add:
case INS_sub:
if (imm < 0)
{
imm = -imm;
ins = (ins == INS_add) ? INS_sub : INS_add;
}
immFitsInIns = validImmForInstr(ins, (target_ssize_t)imm, flags);
break;

Expand Down Expand Up @@ -653,6 +658,254 @@ void CodeGen::genJumpTable(GenTree* treeNode)
genProduceReg(treeNode);
}

//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node.
//
// Arguments:
// treeNode - the GT_XADD/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The genLocked* methods added here looks similar to the one in arm64. can the logic be shared and have just a single method in codegenarmarch.cpp?

{
GenTree* data = treeNode->AsOp()->gtOp2;
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();

genConsumeAddress(addr);
genConsumeRegs(data);

assert(!treeNode->OperIs(GT_XORR, GT_XAND));
assert(treeNode->OperIs(GT_XCHG) || !varTypeIsSmall(treeNode->TypeGet()));

emitAttr dataSize = emitActualTypeSize(data);

regNumber tempReg = treeNode->ExtractTempReg(RBM_ALLINT);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We now use new data structure to extract the temp register in #101647. Please update it accordingly.

regNumber storeReg = (treeNode->OperGet() == GT_XCHG) ? dataReg : treeNode->ExtractTempReg(RBM_ALLINT);
regNumber loadReg = (targetReg != REG_NA) ? targetReg : storeReg;

// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);

noway_assert(addrReg != loadReg);
noway_assert(dataReg != loadReg);

noway_assert((treeNode->OperGet() == GT_XCHG) || (addrReg != dataReg));

assert(addr->isUsedFromReg());
noway_assert(tempReg != REG_NA);
noway_assert(tempReg != targetReg);
noway_assert((targetReg != REG_NA) || (treeNode->OperGet() != GT_XCHG));

// Store exclusive unpredictable cases must be avoided
noway_assert(tempReg != addrReg);

// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.

gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());

// Emit code like this:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is done for other platforms as well, but not sure why this cannot be done in lower. The codegen should just emit the code instead of inserting the loop like code here.

// retry:
// ldrex loadReg, [addrReg]
// add storeReg, loadReg, dataReg # Only for GT_XADD
// # GT_XCHG storeReg === dataReg
// strex tempReg, storeReg, [addrReg]
// cmp tempReg, 0
// bne retry
// dmb ish

instruction insLd = INS_ldrex;
instruction insSt = INS_strex;
if (varTypeIsByte(treeNode->TypeGet()))
{
insLd = INS_ldrexb;
insSt = INS_strexb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
insLd = INS_ldrexh;
insSt = INS_strexh;
}

instGen_MemoryBarrier();

BasicBlock* labelRetry = genCreateTempLabel();
genDefineTempLabel(labelRetry);

// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(insLd, dataSize, loadReg, addrReg);

if (treeNode->OperGet() == GT_XADD)
{
if (data->isContainedIntOrIImmed())
{
genInstrWithConstant(INS_add, dataSize, storeReg, loadReg, data->AsIntConCommon()->IconValue(),
INS_FLAGS_DONT_CARE, tempReg);
}
else
{
GetEmitter()->emitIns_R_R_R(INS_add, dataSize, storeReg, loadReg, dataReg);
}
}

// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(insSt, dataSize, tempReg, storeReg, addrReg);

GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, tempReg, 0);
GetEmitter()->emitIns_J(INS_bne, labelRetry);

instGen_MemoryBarrier();

gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());

if (targetReg != REG_NA)
{
if (varTypeIsSmall(treeNode->TypeGet()) && varTypeIsSigned(treeNode->TypeGet()))
{
instruction mov = varTypeIsShort(treeNode->TypeGet()) ? INS_sxth : INS_sxtb;
GetEmitter()->emitIns_Mov(mov, EA_4BYTE, targetReg, targetReg, /* canSkip */ false);
}

genProduceReg(treeNode);
}
}

//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode)
{
assert(treeNode->OperIs(GT_CMPXCHG));

GenTree* addr = treeNode->Addr(); // arg1
GenTree* data = treeNode->Data(); // arg2
GenTree* comparand = treeNode->Comparand(); // arg3

regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
regNumber comparandReg = comparand->GetRegNum();

genConsumeAddress(addr);
genConsumeRegs(data);
genConsumeRegs(comparand);

emitAttr dataSize = emitActualTypeSize(data);

regNumber exResultReg = treeNode->ExtractTempReg(RBM_ALLINT);

// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(dataReg != targetReg);
noway_assert(comparandReg != targetReg);
noway_assert(addrReg != dataReg);
noway_assert(targetReg != REG_NA);
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);

assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert(!comparand->isUsedFromMemory());

// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != dataReg);
noway_assert(exResultReg != addrReg);

// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.

gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());

// Emit code like this:
// retry:
// ldrex targetReg, [addrReg]
// cmp targetReg, comparandReg
// bne compareFail
// strex exResult, dataReg, [addrReg]
// cmp exResult, 0
// bne retry
// compareFail:
// dmb ish

instruction insLd = INS_ldrex;
instruction insSt = INS_strex;
if (varTypeIsByte(treeNode->TypeGet()))
{
insLd = INS_ldrexb;
insSt = INS_strexb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
insLd = INS_ldrexh;
insSt = INS_strexh;
}

instGen_MemoryBarrier();

BasicBlock* labelRetry = genCreateTempLabel();
BasicBlock* labelCompareFail = genCreateTempLabel();
genDefineTempLabel(labelRetry);

// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(insLd, dataSize, targetReg, addrReg);

if (comparand->isContainedIntOrIImmed())
{
if (comparand->IsIntegralConst(0) && emitter::isLowRegister(targetReg))
{
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelCompareFail, targetReg);
}
else
{
assert(comparand->AsIntConCommon()->IconValue() <= INT32_MAX);
GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, targetReg,
(target_ssize_t)comparand->AsIntConCommon()->IconValue());
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
else
{
GetEmitter()->emitIns_R_R(INS_cmp, EA_4BYTE, targetReg, comparandReg);
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}

// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(insSt, dataSize, exResultReg, dataReg, addrReg);

GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, exResultReg, 0);
GetEmitter()->emitIns_J(INS_bne, labelRetry);

genDefineTempLabel(labelCompareFail);

instGen_MemoryBarrier();

gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());

if (varTypeIsSmall(treeNode->TypeGet()) && varTypeIsSigned(treeNode->TypeGet()))
{
instruction mov = varTypeIsShort(treeNode->TypeGet()) ? INS_sxth : INS_sxtb;
GetEmitter()->emitIns_Mov(mov, EA_4BYTE, targetReg, targetReg, /* canSkip */ false);
}

genProduceReg(treeNode);
}

//------------------------------------------------------------------------
// genGetInsForOper: Return instruction encoding of the operation tree.
//
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/codegenarmarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -429,17 +429,17 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
}

#ifdef TARGET_ARM64
case GT_XCHG:
case GT_XORR:
case GT_XAND:
#endif // TARGET_ARM64
case GT_XCHG:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;

case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
#endif // TARGET_ARM64

#ifdef SWIFT_SUPPORT
case GT_SWIFT_ERROR:
Expand Down