diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 8edb796884b5c..b4b02e24f85cc 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -330,8 +330,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst, bool isVolatile = false, mlir::IntegerAttr align = {}, + cir::SyncScopeKindAttr scope = {}, cir::MemOrderAttr order = {}) { - return cir::StoreOp::create(*this, loc, val, dst, isVolatile, align, order); + return cir::StoreOp::create(*this, loc, val, dst, isVolatile, align, scope, + order); } /// Emit a load from an boolean flag variable. diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 635809afdf2cc..12c5e399f02da 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -649,11 +649,13 @@ def CIR_StoreOp : CIR_Op<"store", [ [MemWrite]>:$addr, UnitAttr:$is_volatile, OptionalAttr:$alignment, + OptionalAttr:$sync_scope, OptionalAttr:$mem_order); let assemblyFormat = [{ (`volatile` $is_volatile^)? (`align` `(` $alignment^ `)`)? + (`syncscope` `(` $sync_scope^ `)`)? (`atomic` `(` $mem_order^ `)`)? $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr)) }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 700e5f401a18f..0b8cded35fee9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -455,13 +455,15 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_store_n: - case AtomicExpr::AO__atomic_store: { + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__scoped_atomic_store: + case AtomicExpr::AO__scoped_atomic_store_n: { cir::LoadOp loadVal1 = builder.createLoad(loc, val1); assert(!cir::MissingFeatures::atomicSyncScopeID()); builder.createStore(loc, loadVal1, ptr, expr->isVolatile(), - /*align=*/mlir::IntegerAttr{}, orderAttr); + /*align=*/mlir::IntegerAttr{}, scopeAttr, orderAttr); return; } @@ -584,8 +586,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__opencl_atomic_store: case AtomicExpr::AO__hip_atomic_store: - case AtomicExpr::AO__scoped_atomic_store: - case AtomicExpr::AO__scoped_atomic_store_n: case AtomicExpr::AO__hip_atomic_exchange: case AtomicExpr::AO__opencl_atomic_exchange: @@ -849,6 +849,7 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) { break; case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__scoped_atomic_store: val1 = emitPointerWithAlignment(e->getVal1()); break; @@ -912,6 +913,7 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) { case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__scoped_atomic_store_n: val1 = emitValToTemp(*this, e->getVal1()); break; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6e8c5d369dbc5..a9f7fe1386fa0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -492,11 +492,12 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile = false, mlir::IntegerAttr align = {}, + cir::SyncScopeKindAttr scope = {}, cir::MemOrderAttr order = {}) { if (!align) align = getAlignmentAttr(dst.getAlignment()); return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), isVolatile, - align, order); + align, scope, order); } /// Create a cir.complex.real_ptr operation that derives a pointer to the real diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index 66469e208d7b0..f79a52e2fb9b3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -143,6 +143,7 @@ DeletionKind cir::CopyOp::removeBlockingUses( cir::StoreOp::create(builder, getLoc(), reachingDefinition, getDst(), /*isVolatile=*/false, /*alignment=*/mlir::IntegerAttr{}, + /*sync_scope=*/cir::SyncScopeKindAttr(), /*mem-order=*/cir::MemOrderAttr()); return DeletionKind::Delete; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8a3b0a1448d2e..be7724317b21c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1684,13 +1684,16 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( // Convert adapted value to its memory type if needed. mlir::Value value = emitToMemory(rewriter, dataLayout, op.getValue().getType(), adaptor.getValue()); - // TODO: nontemporal, syncscope. + // TODO: nontemporal. assert(!cir::MissingFeatures::opLoadStoreNontemporal()); assert(!cir::MissingFeatures::opLoadStoreTbaa()); + std::optional syncScope = + getLLVMSyncScope(op.getSyncScope()); mlir::LLVM::StoreOp storeOp = mlir::LLVM::StoreOp::create( rewriter, op->getLoc(), value, adaptor.getAddr(), alignment, op.getIsVolatile(), - /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder); + /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder, + syncScope.value_or(llvm::StringRef())); rewriter.replaceOp(op, storeOp); assert(!cir::MissingFeatures::opLoadStoreTbaa()); return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c b/clang/test/CIR/CodeGen/atomic-scoped.c index 04989589bee26..5b8c868d6c9d6 100644 --- a/clang/test/CIR/CodeGen/atomic-scoped.c +++ b/clang/test/CIR/CodeGen/atomic-scoped.c @@ -38,3 +38,35 @@ void scoped_atomic_load_n(int *ptr) { // LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4 // OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4 } + +void scoped_atomic_store(int *ptr, int value) { + // CIR-LABEL: @scoped_atomic_store + // LLVM-LABEL: @scoped_atomic_store + // OGCG-LABEL: @scoped_atomic_store + + __scoped_atomic_store(ptr, &value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE); + // CIR: cir.store align(4) syncscope(single_thread) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr + // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} syncscope("singlethread") monotonic, align 4 + // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 + + __scoped_atomic_store(ptr, &value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); + // CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr + // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 + // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 +} + +void scoped_atomic_store_n(int *ptr, int value) { + // CIR-LABEL: @scoped_atomic_store_n + // LLVM-LABEL: @scoped_atomic_store_n + // OGCG-LABEL: @scoped_atomic_store_n + + __scoped_atomic_store_n(ptr, value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE); + // CIR: cir.store align(4) syncscope(single_thread) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr + // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} syncscope("singlethread") monotonic, align 4 + // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 + + __scoped_atomic_store_n(ptr, value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); + // CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr + // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 + // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 +} diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c index 64e0961fe20d9..64b4cd7738d50 100644 --- a/clang/test/CIR/CodeGen/atomic.c +++ b/clang/test/CIR/CodeGen/atomic.c @@ -165,9 +165,9 @@ void store(int *ptr, int x) { } // CIR-LABEL: @store -// CIR: cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr -// CIR: cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr -// CIR: cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr // CIR: } // LLVM-LABEL: @store @@ -189,9 +189,9 @@ void store_n(int *ptr, int x) { } // CIR-LABEL: @store_n -// CIR: cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr -// CIR: cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr -// CIR: cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr // CIR: } // LLVM-LABEL: @store_n @@ -213,9 +213,9 @@ void c11_store(_Atomic(int) *ptr, int x) { } // CIR-LABEL: @c11_store -// CIR: cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr -// CIR: cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr -// CIR: cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(release) %{{.+}}, %{{.+}} : !s32i, !cir.ptr +// CIR: cir.store align(4) syncscope(system) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, !cir.ptr // CIR: } // LLVM-LABEL: @c11_store @@ -1222,17 +1222,17 @@ void atomic_store_dynamic_order(int *ptr, int order) { // CIR: cir.switch (%[[ORDER]] : !s32i) { // CIR-NEXT: cir.case(default, []) { // CIR-NEXT: %[[VALUE:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i - // CIR-NEXT: cir.store align(4) atomic(relaxed) %[[VALUE]], %[[PTR]] : !s32i, !cir.ptr + // CIR-NEXT: cir.store align(4) syncscope(system) atomic(relaxed) %[[VALUE]], %[[PTR]] : !s32i, !cir.ptr // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.case(anyof, [#cir.int<3> : !s32i]) { // CIR-NEXT: %[[VALUE:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i - // CIR-NEXT: cir.store align(4) atomic(release) %[[VALUE]], %[[PTR]] : !s32i, !cir.ptr + // CIR-NEXT: cir.store align(4) syncscope(system) atomic(release) %[[VALUE]], %[[PTR]] : !s32i, !cir.ptr // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) { // CIR-NEXT: %[[VALUE:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i - // CIR-NEXT: cir.store align(4) atomic(seq_cst) %[[VALUE]], %[[PTR]] : !s32i, !cir.ptr + // CIR-NEXT: cir.store align(4) syncscope(system) atomic(seq_cst) %[[VALUE]], %[[PTR]] : !s32i, !cir.ptr // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.yield