diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td index aa6551eb43fa6..e80745781f566 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td @@ -315,6 +315,16 @@ def LLVM_NoAliasScopeDeclOp let assemblyFormat = "$scope attr-dict"; } +def LLVM_PtrMaskOp + : LLVM_OneResultIntrOp<"ptrmask", [], [0, 1], + [Pure, AllTypesMatch<["ptr", "res"]>]> { + let arguments = (ins LLVM_ScalarOrVectorOf:$ptr, + LLVM_ScalarOrVectorOf:$mask); + let results = (outs LLVM_ScalarOrVectorOf:$res); + + let assemblyFormat = "$ptr `,` $mask attr-dict `:` functional-type(operands, results)"; +} + // // Memory marker intrinsics. // diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll index d14fc8a5942ca..4676f7d63925b 100644 --- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll +++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll @@ -994,6 +994,20 @@ define float @ssa_copy(float %0) { ret float %2 } +; CHECK-LABEL: llvm.func @ptrmask +define ptr @ptrmask(ptr %0, i64 %1) { + ; CHECK: %{{.*}} = llvm.intr.ptrmask %{{.*}} : (!llvm.ptr, i64) -> !llvm.ptr + %3 = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 %1) + ret ptr %3 +} + +; CHECK-LABEL: llvm.func @vector_ptrmask +define <8 x ptr> @vector_ptrmask(<8 x ptr> %0, <8 x i64> %1) { + ; CHECK: %{{.*}} = llvm.intr.ptrmask %{{.*}} : (!llvm.vec<8 x ptr>, vector<8xi64>) -> !llvm.vec<8 x ptr> + %3 = call <8 x ptr> @llvm.ptrmask.v8p0.v8i64(<8 x ptr> %0, <8 x i64> %1) + ret <8 x ptr> %3 +} + ; CHECK-LABEL: experimental_constrained_fptrunc define void @experimental_constrained_fptrunc(double %s, <4 x double> %v) { ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} towardzero ignore : f64 to f32 @@ -1247,6 +1261,8 @@ declare ptr @llvm.strip.invariant.group.p0(ptr nocapture) declare void @llvm.assume(i1) declare float @llvm.ssa.copy.f32(float returned) +declare ptr @llvm.ptrmask.p0.i64(ptr, i64) +declare <8 x ptr> @llvm.ptrmask.v8p0.v8i64(<8 x ptr>, <8 x i64>) declare @llvm.vector.insert.nxv4f32.v4f32(, <4 x float>, i64) declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(, i64) declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata) diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir index 0b47163cc51d3..a027ad8b9f2ec 100644 --- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir @@ -1079,6 +1079,20 @@ llvm.func @ssa_copy(%arg: f32) -> f32 { llvm.return %0 : f32 } +// CHECK-LABEL: @ptrmask +llvm.func @ptrmask(%p: !llvm.ptr, %mask: i64) -> !llvm.ptr { + // CHECK: call ptr @llvm.ptrmask.p0.i64 + %0 = llvm.intr.ptrmask %p, %mask : (!llvm.ptr, i64) -> !llvm.ptr + llvm.return %0 : !llvm.ptr +} + +// CHECK-LABEL: @vector_ptrmask +llvm.func @vector_ptrmask(%p: !llvm.vec<8 x ptr>, %mask: vector<8 x i64>) -> !llvm.vec<8 x ptr> { + // CHECK: call <8 x ptr> @llvm.ptrmask.v8p0.v8i64 + %0 = llvm.intr.ptrmask %p, %mask : (!llvm.vec<8 x ptr>, vector<8 x i64>) -> !llvm.vec<8 x ptr> + llvm.return %0 : !llvm.vec<8 x ptr> +} + // CHECK-LABEL: @experimental_constrained_fptrunc llvm.func @experimental_constrained_fptrunc(%s: f64, %v: vector<4xf32>) { // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64( @@ -1272,6 +1286,8 @@ llvm.func @experimental_constrained_fptrunc(%s: f64, %v: vector<4xf32>) { // CHECK-DAG: declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr captures(none)) // CHECK-DAG: declare float @llvm.ssa.copy.f32(float returned) +// CHECK-DAG: declare ptr @llvm.ptrmask.p0.i64(ptr, i64) +// CHECK-DAG: declare <8 x ptr> @llvm.ptrmask.v8p0.v8i64(<8 x ptr>, <8 x i64>) // CHECK-DAG: declare ptr @llvm.stacksave.p0() // CHECK-DAG: declare ptr addrspace(1) @llvm.stacksave.p1() // CHECK-DAG: declare void @llvm.stackrestore.p0(ptr)