Skip to content

Commit f0665b2

Browse files
committed
[CIR] Add sync scope to atomic load operations
1 parent 3fc7419 commit f0665b2

File tree

8 files changed

+152
-49
lines changed

8 files changed

+152
-49
lines changed

clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
205205
bool isVolatile = false, uint64_t alignment = 0) {
206206
mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
207207
return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false, isVolatile,
208-
alignmentAttr, cir::MemOrderAttr{});
208+
alignmentAttr, cir::SyncScopeKindAttr{},
209+
cir::MemOrderAttr{});
209210
}
210211

211212
mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr,
@@ -366,7 +367,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
366367
auto addr = createAlloca(loc, getPointerTo(type), type, {}, alignmentAttr);
367368
return cir::LoadOp::create(*this, loc, addr, /*isDeref=*/false,
368369
/*isVolatile=*/false, alignmentAttr,
369-
/*mem_order=*/{});
370+
/*sync_scope=*/{}, /*mem_order=*/{});
370371
}
371372

372373
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base,

clang/include/clang/CIR/Dialect/IR/CIROps.td

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -446,6 +446,15 @@ def CIR_MemOrder : CIR_I32EnumAttr<
446446
I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
447447
]>;
448448

449+
//===----------------------------------------------------------------------===//
450+
// C/C++ sync scope definitions
451+
//===----------------------------------------------------------------------===//
452+
453+
def CIR_SyncScopeKind : CIR_I32EnumAttr<"SyncScopeKind", "sync scope kind", [
454+
I32EnumAttrCase<"SingleThread", 0, "single_thread">,
455+
I32EnumAttrCase<"System", 1, "system">
456+
]>;
457+
449458
//===----------------------------------------------------------------------===//
450459
// AllocaOp
451460
//===----------------------------------------------------------------------===//
@@ -586,13 +595,15 @@ def CIR_LoadOp : CIR_Op<"load", [
586595
UnitAttr:$isDeref,
587596
UnitAttr:$is_volatile,
588597
OptionalAttr<I64Attr>:$alignment,
598+
OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
589599
OptionalAttr<CIR_MemOrder>:$mem_order);
590600
let results = (outs CIR_AnyType:$result);
591601

592602
let assemblyFormat = [{
593603
(`deref` $isDeref^)?
594604
(`volatile` $is_volatile^)?
595605
(`align` `(` $alignment^ `)`)?
606+
(`syncscope` `(` $sync_scope^ `)`)?
596607
(`atomic` `(` $mem_order^ `)`)?
597608
$addr `:` qualified(type($addr)) `,` type($result) attr-dict
598609
}];
@@ -5265,11 +5276,6 @@ def CIR_AtomicFetchKind : CIR_I32EnumAttr<
52655276
I32EnumAttrCase<"Min", 7, "min">
52665277
]>;
52675278

5268-
def CIR_SyncScopeKind : CIR_I32EnumAttr<"SyncScopeKind", "sync scope kind", [
5269-
I32EnumAttrCase<"SingleThread", 0, "single_thread">,
5270-
I32EnumAttrCase<"System", 1, "system">
5271-
]>;
5272-
52735279
def CIR_AtomicFetchOp : CIR_Op<"atomic.fetch", [
52745280
AllTypesMatch<["result", "val"]>,
52755281
TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",

clang/include/clang/CIR/MissingFeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,7 @@ struct MissingFeatures {
180180
static bool atomicInfoGetAtomicAddress() { return false; }
181181
static bool atomicScope() { return false; }
182182
static bool atomicSyncScopeID() { return false; }
183+
static bool atomicMapTargetSyncScope() { return false; }
183184
static bool atomicTypes() { return false; }
184185
static bool atomicUseLibCall() { return false; }
185186
static bool atomicMicrosoftVolatile() { return false; }

clang/lib/CIR/CodeGen/CIRGenAtomic.cpp

Lines changed: 73 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -399,20 +399,14 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e,
399399
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
400400
Address ptr, Address val1, Address val2,
401401
Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
402-
cir::MemOrder order) {
403-
std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
404-
if (scopeModel) {
405-
assert(!cir::MissingFeatures::atomicScope());
406-
cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
407-
return;
408-
}
409-
402+
cir::MemOrder order, cir::SyncScopeKind scope) {
410403
assert(!cir::MissingFeatures::atomicSyncScopeID());
411404
llvm::StringRef opName;
412405

413406
CIRGenBuilderTy &builder = cgf.getBuilder();
414407
mlir::Location loc = cgf.getLoc(expr->getSourceRange());
415408
auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
409+
auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
416410
cir::AtomicFetchKindAttr fetchAttr;
417411
bool fetchFirst = true;
418412

@@ -446,13 +440,14 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
446440

447441
case AtomicExpr::AO__c11_atomic_load:
448442
case AtomicExpr::AO__atomic_load_n:
449-
case AtomicExpr::AO__atomic_load: {
443+
case AtomicExpr::AO__atomic_load:
444+
case AtomicExpr::AO__scoped_atomic_load_n:
445+
case AtomicExpr::AO__scoped_atomic_load: {
450446
cir::LoadOp load =
451447
builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
452448

453-
assert(!cir::MissingFeatures::atomicSyncScopeID());
454-
455449
load->setAttr("mem_order", orderAttr);
450+
load->setAttr("sync_scope", scopeAttr);
456451

457452
builder.createStore(loc, load->getResult(0), dest);
458453
return;
@@ -586,8 +581,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
586581

587582
case AtomicExpr::AO__opencl_atomic_load:
588583
case AtomicExpr::AO__hip_atomic_load:
589-
case AtomicExpr::AO__scoped_atomic_load_n:
590-
case AtomicExpr::AO__scoped_atomic_load:
591584

592585
case AtomicExpr::AO__opencl_atomic_store:
593586
case AtomicExpr::AO__hip_atomic_store:
@@ -671,6 +664,51 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
671664
builder.createStore(loc, result, dest);
672665
}
673666

667+
// Map clang sync scope to CIR sync scope.
668+
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf,
669+
SourceRange range,
670+
clang::SyncScope scope) {
671+
switch (scope) {
672+
default: {
673+
assert(!cir::MissingFeatures::atomicSyncScopeID());
674+
cgf.cgm.errorNYI(range, "convertSyncScopeToCIR: unhandled sync scope");
675+
return cir::SyncScopeKind::System;
676+
}
677+
678+
case clang::SyncScope::SingleScope:
679+
return cir::SyncScopeKind::SingleThread;
680+
case clang::SyncScope::SystemScope:
681+
return cir::SyncScopeKind::System;
682+
}
683+
}
684+
685+
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
686+
Address ptr, Address val1, Address val2,
687+
Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
688+
cir::MemOrder order,
689+
const std::optional<Expr::EvalResult> &scopeConst,
690+
mlir::Value scopeValue) {
691+
std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
692+
693+
if (!scopeModel) {
694+
emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
695+
size, order, cir::SyncScopeKind::System);
696+
return;
697+
}
698+
699+
if (scopeConst.has_value()) {
700+
cir::SyncScopeKind mappedScope = convertSyncScopeToCIR(
701+
cgf, expr->getScope()->getSourceRange(),
702+
scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
703+
emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
704+
size, order, mappedScope);
705+
return;
706+
}
707+
708+
assert(!cir::MissingFeatures::atomicSyncScopeID());
709+
cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
710+
}
711+
674712
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
675713
if (!cir::isValidCIRAtomicOrderingCABI(order))
676714
return false;
@@ -688,7 +726,8 @@ static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
688726
static void emitAtomicExprWithDynamicMemOrder(
689727
CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest,
690728
Address ptr, Address val1, Address val2, Expr *isWeakExpr,
691-
Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad) {
729+
Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad,
730+
const std::optional<Expr::EvalResult> &scopeConst, mlir::Value scopeValue) {
692731
// The memory order is not known at compile-time. The atomic operations
693732
// can't handle runtime memory orders; the memory order must be hard coded.
694733
// Generate a "switch" statement that converts a runtime value into a
@@ -706,7 +745,7 @@ static void emitAtomicExprWithDynamicMemOrder(
706745
else
707746
emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
708747
emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
709-
size, actualOrder);
748+
size, actualOrder, scopeConst, scopeValue);
710749
builder.createBreak(loc);
711750
builder.setInsertionPointToEnd(switchBlock);
712751
};
@@ -773,10 +812,19 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
773812
TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
774813
uint64_t size = typeInfo.Width.getQuantity();
775814

776-
Expr::EvalResult orderConst;
777-
mlir::Value order;
778-
if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
779-
order = emitScalarExpr(e->getOrder());
815+
// Emit the memory order operand, and try to evaluate it as a constant.
816+
mlir::Value order = emitScalarExpr(e->getOrder());
817+
std::optional<Expr::EvalResult> orderConst;
818+
if (Expr::EvalResult eval; e->getOrder()->EvaluateAsInt(eval, getContext()))
819+
orderConst.emplace(std::move(eval));
820+
821+
// Emit the sync scope operand, and try to evaluate it as a constant.
822+
mlir::Value scope =
823+
e->getScopeModel() ? emitScalarExpr(e->getScope()) : nullptr;
824+
std::optional<Expr::EvalResult> scopeConst;
825+
if (Expr::EvalResult eval;
826+
e->getScopeModel() && e->getScope()->EvaluateAsInt(eval, getContext()))
827+
scopeConst.emplace(std::move(eval));
780828

781829
bool shouldCastToIntPtrTy = true;
782830

@@ -789,12 +837,14 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
789837
llvm_unreachable("already handled above with emitAtomicInit");
790838

791839
case AtomicExpr::AO__atomic_load_n:
840+
case AtomicExpr::AO__scoped_atomic_load_n:
792841
case AtomicExpr::AO__c11_atomic_load:
793842
case AtomicExpr::AO__atomic_test_and_set:
794843
case AtomicExpr::AO__atomic_clear:
795844
break;
796845

797846
case AtomicExpr::AO__atomic_load:
847+
case AtomicExpr::AO__scoped_atomic_load:
798848
dest = emitPointerWithAlignment(e->getVal1());
799849
break;
800850

@@ -927,18 +977,18 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
927977
e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
928978
e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
929979

930-
if (!order) {
980+
if (orderConst.has_value()) {
931981
// We have evaluated the memory order as an integer constant in orderConst.
932982
// We should not ever get to a case where the ordering isn't a valid CABI
933983
// value, but it's hard to enforce that in general.
934-
uint64_t ord = orderConst.Val.getInt().getZExtValue();
984+
uint64_t ord = orderConst->Val.getInt().getZExtValue();
935985
if (isMemOrderValid(ord, isStore, isLoad))
936986
emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
937-
size, static_cast<cir::MemOrder>(ord));
987+
size, static_cast<cir::MemOrder>(ord), scopeConst, scope);
938988
} else {
939989
emitAtomicExprWithDynamicMemOrder(*this, order, e, dest, ptr, val1, val2,
940990
isWeakExpr, orderFailExpr, size, isStore,
941-
isLoad);
991+
isLoad, scopeConst, scope);
942992
}
943993

944994
if (resultTy->isVoidType())

clang/lib/CIR/CodeGen/CIRGenBuilder.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
462462
mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
463463
return cir::LoadOp::create(*this, loc, addr.getPointer(), /*isDeref=*/false,
464464
isVolatile, /*alignment=*/align,
465+
/*sync_scope=*/cir::SyncScopeKindAttr{},
465466
/*mem_order=*/cir::MemOrderAttr{});
466467
}
467468

@@ -473,6 +474,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
473474
mlir::IntegerAttr alignAttr = getAlignmentAttr(alignment);
474475
return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false,
475476
/*isVolatile=*/false, alignAttr,
477+
/*sync_scope=*/cir::SyncScopeKindAttr{},
476478
/*mem_order=*/cir::MemOrderAttr{});
477479
}
478480

clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1643,12 +1643,15 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
16431643

16441644
assert(!cir::MissingFeatures::lowerModeOptLevel());
16451645

1646-
// TODO: nontemporal, syncscope.
1646+
// TODO: nontemporal.
16471647
assert(!cir::MissingFeatures::opLoadStoreNontemporal());
1648+
std::optional<llvm::StringRef> syncScope =
1649+
getLLVMSyncScope(op.getSyncScope());
16481650
mlir::LLVM::LoadOp newLoad = mlir::LLVM::LoadOp::create(
16491651
rewriter, op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
16501652
op.getIsVolatile(), /*isNonTemporal=*/false,
1651-
/*isInvariant=*/false, /*isInvariantGroup=*/false, ordering);
1653+
/*isInvariant=*/false, /*isInvariantGroup=*/false, ordering,
1654+
syncScope.value_or(llvm::StringRef()));
16521655

16531656
// Convert adapted result to its original type if needed.
16541657
mlir::Value result =
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
2+
// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
3+
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
4+
// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
5+
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
6+
// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
7+
8+
void scoped_atomic_load(int *ptr) {
9+
// CIR-LABEL: @scoped_atomic_load
10+
// LLVM-LABEL: @scoped_atomic_load
11+
// OGCG-LABEL: @scoped_atomic_load
12+
13+
int x;
14+
__scoped_atomic_load(ptr, &x, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
15+
// CIR: %{{.+}} = cir.load align(4) syncscope(single_thread) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
16+
// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} syncscope("singlethread") monotonic, align 4
17+
// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
18+
19+
__scoped_atomic_load(ptr, &x, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
20+
// CIR: %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
21+
// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
22+
// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
23+
}
24+
25+
void scoped_atomic_load_n(int *ptr) {
26+
// CIR-LABEL: @scoped_atomic_load_n
27+
// LLVM-LABEL: @scoped_atomic_load_n
28+
// OGCG-LABEL: @scoped_atomic_load_n
29+
30+
int x;
31+
x = __scoped_atomic_load_n(ptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
32+
// CIR: %{{.+}} = cir.load align(4) syncscope(single_thread) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
33+
// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} syncscope("singlethread") monotonic, align 4
34+
// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
35+
36+
x = __scoped_atomic_load_n(ptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
37+
// CIR: %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) %{{.+}} : !cir.ptr<!s32i>, !s32i
38+
// LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
39+
// OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
40+
}

0 commit comments

Comments
 (0)