diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f80c9faf708ef..f1e24a5215dc8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4131,6 +4131,45 @@ def CIR_ThrowOp : CIR_Op<"throw"> { // Atomic operations //===----------------------------------------------------------------------===// +def CIR_AtomicXchg : CIR_Op<"atomic.xchg", [ + AllTypesMatch<["result", "val"]>, + TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'", + "ptr", "val", "mlir::cast($_self).getPointee()"> +]> { + let summary = "Atomic exchange"; + let description = [{ + C/C++ atomic exchange operation. This operation implements the C/C++ + builtin function `__atomic_exchange`, `__atomic_exchange_n`, and + `__c11_atomic_exchange`. + + This operation takes two arguments: a pointer `ptr` and a value `val`. The + operation atomically replaces the value of the object pointed-to by `ptr` + with `val`, and returns the original value of the object. + + Example: + + ```mlir + %res = cir.atomic.xchg(%ptr : !cir.ptr, + %val : !u64i, + seq_cst) : !u64i + ``` + }]; + + let results = (outs CIR_AnyType:$result); + let arguments = (ins + Arg:$ptr, + CIR_AnyType:$val, + Arg:$mem_order, + UnitAttr:$is_volatile + ); + + let assemblyFormat = [{ + $mem_order (`volatile` $is_volatile^)? + $ptr `,` $val + `:` qualified(type($ptr)) `->` type($result) attr-dict + }]; +} + def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmpxchg", [ AllTypesMatch<["old", "expected", "desired"]> ]> { diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 86ba0299af3cf..e943b0252bf4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -341,6 +341,7 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, } assert(!cir::MissingFeatures::atomicSyncScopeID()); + llvm::StringRef opName; CIRGenBuilderTy &builder = cgf.getBuilder(); mlir::Location loc = cgf.getLoc(expr->getSourceRange()); @@ -400,6 +401,12 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, return; } + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__atomic_exchange: + opName = cir::AtomicXchg::getOperationName(); + break; + case AtomicExpr::AO__opencl_atomic_init: case AtomicExpr::AO__hip_atomic_compare_exchange_strong: @@ -421,11 +428,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__scoped_atomic_store: case AtomicExpr::AO__scoped_atomic_store_n: - case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__hip_atomic_exchange: case AtomicExpr::AO__opencl_atomic_exchange: - case AtomicExpr::AO__atomic_exchange_n: - case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange_n: case AtomicExpr::AO__scoped_atomic_exchange: @@ -503,8 +507,23 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__atomic_clear: cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI"); - break; + return; } + + assert(!opName.empty() && "expected operation name to build"); + mlir::Value loadVal1 = builder.createLoad(loc, val1); + + SmallVector atomicOperands = {ptr.getPointer(), loadVal1}; + SmallVector atomicResTys = {loadVal1.getType()}; + mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName), + atomicOperands, atomicResTys); + + rmwOp->setAttr("mem_order", orderAttr); + if (expr->isVolatile()) + rmwOp->setAttr("is_volatile", builder.getUnitAttr()); + + mlir::Value result = rmwOp->getResult(0); + builder.createStore(loc, result, dest); } static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) { @@ -572,6 +591,11 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) { val1 = emitPointerWithAlignment(e->getVal1()); break; + case AtomicExpr::AO__atomic_exchange: + val1 = emitPointerWithAlignment(e->getVal1()); + dest = emitPointerWithAlignment(e->getVal2()); + break; + case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: case AtomicExpr::AO__c11_atomic_compare_exchange_weak: @@ -590,7 +614,9 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) { isWeakExpr = e->getWeak(); break; + case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__c11_atomic_store: val1 = emitValToTemp(*this, e->getVal1()); break; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8e0991e8c0708..1865698838134 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -717,6 +717,17 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMAtomicXchgLowering::matchAndRewrite( + cir::AtomicXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert(!cir::MissingFeatures::atomicSyncScopeID()); + mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(adaptor.getMemOrder()); + rewriter.replaceOpWithNewOp( + op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(), + llvmOrder); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite( cir::BitClrsbOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c index 0eba2959c0ebc..76289c597a2b5 100644 --- a/clang/test/CIR/CodeGen/atomic.c +++ b/clang/test/CIR/CodeGen/atomic.c @@ -415,3 +415,102 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired) { // OGCG-NEXT: %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8 // OGCG-NEXT: store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1 } + +void c11_atomic_exchange(_Atomic(int) *ptr, int value) { + // CIR-LABEL: @c11_atomic_exchange + // LLVM-LABEL: @c11_atomic_exchange + // OGCG-LABEL: @c11_atomic_exchange + + __c11_atomic_exchange(ptr, value, __ATOMIC_RELAXED); + __c11_atomic_exchange(ptr, value, __ATOMIC_CONSUME); + __c11_atomic_exchange(ptr, value, __ATOMIC_ACQUIRE); + __c11_atomic_exchange(ptr, value, __ATOMIC_RELEASE); + __c11_atomic_exchange(ptr, value, __ATOMIC_ACQ_REL); + __c11_atomic_exchange(ptr, value, __ATOMIC_SEQ_CST); + // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4 + + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4 +} + +void atomic_exchange(int *ptr, int *value, int *old) { + // CIR-LABEL: @atomic_exchange + // LLVM-LABEL: @atomic_exchange + // OGCG-LABEL: @atomic_exchange + + __atomic_exchange(ptr, value, old, __ATOMIC_RELAXED); + __atomic_exchange(ptr, value, old, __ATOMIC_CONSUME); + __atomic_exchange(ptr, value, old, __ATOMIC_ACQUIRE); + __atomic_exchange(ptr, value, old, __ATOMIC_RELEASE); + __atomic_exchange(ptr, value, old, __ATOMIC_ACQ_REL); + __atomic_exchange(ptr, value, old, __ATOMIC_SEQ_CST); + // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4 + + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4 +} + +void atomic_exchange_n(int *ptr, int value) { + // CIR-LABEL: @atomic_exchange_n + // LLVM-LABEL: @atomic_exchange_n + // OGCG-LABEL: @atomic_exchange_n + + __atomic_exchange_n(ptr, value, __ATOMIC_RELAXED); + __atomic_exchange_n(ptr, value, __ATOMIC_CONSUME); + __atomic_exchange_n(ptr, value, __ATOMIC_ACQUIRE); + __atomic_exchange_n(ptr, value, __ATOMIC_RELEASE); + __atomic_exchange_n(ptr, value, __ATOMIC_ACQ_REL); + __atomic_exchange_n(ptr, value, __ATOMIC_SEQ_CST); + // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4 + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4 + + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4 +} diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir new file mode 100644 index 0000000000000..6ca5af2aac175 --- /dev/null +++ b/clang/test/CIR/IR/atomic.cir @@ -0,0 +1,21 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @atomic_xchg(%ptr: !cir.ptr, %val: !s32i) { + // CHECK-LABEL: @atomic_xchg + %0 = cir.atomic.xchg relaxed %ptr, %val : !cir.ptr -> !s32i + // CHECK: cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + %1 = cir.atomic.xchg consume %ptr, %val : !cir.ptr -> !s32i + // CHECK: cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + %2 = cir.atomic.xchg acquire %ptr, %val : !cir.ptr -> !s32i + // CHECK: cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + %3 = cir.atomic.xchg release %ptr, %val : !cir.ptr -> !s32i + // CHECK: cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + %4 = cir.atomic.xchg acq_rel %ptr, %val : !cir.ptr -> !s32i + // CHECK: cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + %5 = cir.atomic.xchg seq_cst %ptr, %val : !cir.ptr -> !s32i + // CHECK: cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr -> !s32i + cir.return +}