diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4def17cd38e7..ff2a6d626bca 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -6063,6 +6063,68 @@ def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [ let hasVerifier = 1; } +def CIR_AtomicTestAndSetOp : CIR_Op<"atomic.test_and_set"> { + let summary = "Atomic test and set"; + let description = [{ + C/C++ atomic test and set operation. Implements the builtin function + `__atomic_test_and_set`. + + The operation takes as its only operand a pointer to an 8-bit signed + integer. The operation atomically set the integer to an implementation- + defined non-zero "set" value. The result of the operation is a boolean value + indicating whether the previous value of the integer was the "set" value. + + Example: + ```mlir + %res = cir.atomic.test_and_set seq_cst %ptr : !cir.ptr -> !cir.bool + ``` + }]; + + let arguments = (ins + Arg, "", [MemRead, MemWrite]>:$ptr, + Arg:$mem_order, + OptionalAttr:$syncscope, + OptionalAttr:$alignment, + UnitAttr:$is_volatile); + + let results = (outs CIR_BoolType:$result); + + let assemblyFormat = [{ + $mem_order $ptr + (`volatile` $is_volatile^)? + `:` qualified(type($ptr)) `->` qualified(type($result)) attr-dict + }]; +} + +def CIR_AtomicClearOp : CIR_Op<"atomic.clear"> { + let summary = "Atomic clear"; + let description = [{ + C/C++ atomic clear operation. Implements the builtin function + `__atomic_clear`. + + The operation takes as its only operand a pointer to an 8-bit signed + integer. The operation atomically sets the integer to zero. + + Example: + ```mlir + cir.atomic.clear seq_cst %ptr : !cir.ptr + ``` + }]; + + let arguments = (ins + Arg, "", [MemRead, MemWrite]>:$ptr, + Arg:$mem_order, + OptionalAttr:$syncscope, + OptionalAttr:$alignment, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + $mem_order $ptr + (`volatile` $is_volatile^)? + `:` qualified(type($ptr)) attr-dict + }]; +} + def CIR_AtomicFence : CIR_Op<"atomic.fence"> { let summary = "Atomic thread fence"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index be1995ac5aa5..20bde84d1395 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -746,11 +746,22 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, cir::AtomicFetchKind::Nand); break; case AtomicExpr::AO__atomic_test_and_set: { - llvm_unreachable("NYI"); + auto op = cir::AtomicTestAndSetOp::create( + builder, loc, Ptr.getPointer(), Order, + cir::MemScopeKindAttr::get(&CGF.getMLIRContext(), Scope), + builder.getI64IntegerAttr(Ptr.getAlignment().getQuantity()), + E->isVolatile()); + builder.createStore(loc, op, Dest); + return; } case AtomicExpr::AO__atomic_clear: { - llvm_unreachable("NYI"); + cir::AtomicClearOp::create( + builder, loc, Ptr.getPointer(), Order, + cir::MemScopeKindAttr::get(&CGF.getMLIRContext(), Scope), + builder.getI64IntegerAttr(Ptr.getAlignment().getQuantity()), + E->isVolatile()); + return; } } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ce2564d6e031..404e0f52b0f4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3647,6 +3647,41 @@ mlir::LogicalResult CIRToLLVMAtomicFetchLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMAtomicTestAndSetOpLowering::matchAndRewrite( + cir::AtomicTestAndSetOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::LLVM::AtomicOrdering llvmOrder = getLLVMAtomicOrder(op.getMemOrder()); + llvm::StringRef llvmSyncScope = + getLLVMSyncScope(adaptor.getSyncscope()).value_or(StringRef()); + + auto one = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(), + rewriter.getI8Type(), 1); + auto rmw = mlir::LLVM::AtomicRMWOp::create( + rewriter, op.getLoc(), mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), + one, llvmOrder, llvmSyncScope, adaptor.getAlignment().value_or(0), + op.getIsVolatile()); + auto cmp = mlir::LLVM::ICmpOp::create( + rewriter, op.getLoc(), mlir::LLVM::ICmpPredicate::ne, one, rmw); + + rewriter.replaceOp(op, cmp); + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMAtomicClearOpLowering::matchAndRewrite( + cir::AtomicClearOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // FIXME: add syncscope. + mlir::LLVM::AtomicOrdering llvmOrder = getLLVMAtomicOrder(op.getMemOrder()); + auto zero = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(), + rewriter.getI8Type(), 0); + auto store = mlir::LLVM::StoreOp::create( + rewriter, op.getLoc(), zero, adaptor.getPtr(), + adaptor.getAlignment().value_or(0), op.getIsVolatile(), + /*isNonTemporal=*/false, /*isInvariantGroup=*/false, llvmOrder); + rewriter.replaceOp(op, store); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMAtomicFenceLowering::matchAndRewrite( cir::AtomicFence op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -4603,8 +4638,10 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMAssumeAlignedOpLowering, CIRToLLVMAssumeOpLowering, CIRToLLVMAssumeSepStorageOpLowering, + CIRToLLVMAtomicClearOpLowering, CIRToLLVMAtomicCmpXchgLowering, CIRToLLVMAtomicFetchLowering, + CIRToLLVMAtomicTestAndSetOpLowering, CIRToLLVMAtomicXchgLowering, CIRToLLVMAtomicFenceLowering, CIRToLLVMBaseClassAddrOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index f13c37106665..4378da64ec4a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -936,6 +936,26 @@ class CIRToLLVMAtomicFetchLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMAtomicTestAndSetOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AtomicTestAndSetOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAtomicClearOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AtomicClearOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMAtomicFenceLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 3575bbf9e713..d033d2a45e07 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -1156,3 +1156,43 @@ extern "C" void test_op_and_fetch(void) // LLVM: store i64 [[RET7]], ptr @ull, align 8 ull = __sync_nand_and_fetch(&ull, uc); } + +// CHECK-LABEL: @_Z12test_and_setPvPVv +// LLVM-LABEL: @_Z12test_and_setPvPVv +void test_and_set(void *p, volatile void *vp) { + bool x = __atomic_test_and_set(p, __ATOMIC_SEQ_CST); + // CHECK: %[[VOID_PTR:.+]] = cir.load align(8) %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[PTR:.+]] = cir.cast bitcast %[[VOID_PTR]] : !cir.ptr -> !cir.ptr + // CHECK: %{{.+}} = cir.atomic.test_and_set seq_cst %[[PTR]] : !cir.ptr -> !cir.bool + + // LLVM: %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8 + // LLVM-NEXT: %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i8 1 seq_cst, align 1 + // LLVM-NEXT: %{{.+}} = icmp ne i8 1, %[[RES]] + + bool y = __atomic_test_and_set(vp, __ATOMIC_SEQ_CST); + // CHECK: %[[VOID_PTR:.+]] = cir.load align(8) %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[PTR:.+]] = cir.cast bitcast %[[VOID_PTR]] : !cir.ptr -> !cir.ptr + // CHECK: %{{.+}} = cir.atomic.test_and_set seq_cst %[[PTR]] volatile : !cir.ptr -> !cir.bool + + // LLVM: %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8 + // LLVM-NEXT: %[[RES:.+]] = atomicrmw volatile xchg ptr %[[PTR]], i8 1 seq_cst, align 1 + // LLVM-NEXT: %{{.+}} = icmp ne i8 1, %[[RES]] +} + +// CHECK-LABEL: @_Z5clearPvPVv +// LLVM-LABEL: @_Z5clearPvPVv +void clear(void *p, volatile void *vp) { + __atomic_clear(p, __ATOMIC_SEQ_CST); + // CHECK: %[[VOID_PTR:.+]] = cir.load align(8) %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[PTR:.+]] = cir.cast bitcast %[[VOID_PTR]] : !cir.ptr -> !cir.ptr + // CHECK: cir.atomic.clear seq_cst %[[PTR]] : !cir.ptr + + // LLVM: store atomic i8 0, ptr %{{.+}} seq_cst, align 1 + + __atomic_clear(vp, __ATOMIC_SEQ_CST); + // CHECK: %[[VOID_PTR:.+]] = cir.load align(8) %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[PTR:.+]] = cir.cast bitcast %[[VOID_PTR]] : !cir.ptr -> !cir.ptr + // CHECK: cir.atomic.clear seq_cst %[[PTR]] volatile : !cir.ptr + + // LLVM: store atomic volatile i8 0, ptr %{{.+}} seq_cst, align 1 +}