diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 5488d3cb12f5f..e7f1c59eea4ed 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -11452,6 +11452,9 @@ static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC, } } + if (!Imm) + return SDValue(); + SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm); return DAG.getNode(AArch64ISD::SETCC_PRED, DL, VT, Pred, N->getOperand(2), Splat, DAG.getCondCode(CC)); diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll index 824ce5a1ef608..eb885035307ee 100644 --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll @@ -940,6 +940,30 @@ define @cmpne_ir_d( %a, % ret %out } + +define @cmpgt_wide_splat_b( %pg, %a, i64 %b) { +; CHECK-LABEL: cmpgt_wide_splat_b: +; CHECK: cmpgt p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %splat = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b) + %out = call @llvm.aarch64.sve.cmpgt.wide.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @cmpls_wide_splat_s( %pg, %a, i64 %b) { +; CHECK-LABEL: cmpls_wide_splat_s: +; CHECK: cmpls p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %splat = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b) + %out = call @llvm.aarch64.sve.cmpls.wide.nxv4i32( %pg, + %a, + %splat) + ret %out +} + + declare @llvm.aarch64.sve.cmpeq.nxv16i8(, , ) declare @llvm.aarch64.sve.cmpeq.nxv8i16(, , ) declare @llvm.aarch64.sve.cmpeq.nxv4i32(, , ) @@ -1003,3 +1027,5 @@ declare @llvm.aarch64.sve.cmpne.nxv2i64(, @llvm.aarch64.sve.cmpne.wide.nxv16i8(, , ) declare @llvm.aarch64.sve.cmpne.wide.nxv8i16(, , ) declare @llvm.aarch64.sve.cmpne.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.dup.x.nxv2i64(i64)