diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index df8eff2812cf2..7c57d6ca1aec0 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -1178,6 +1178,10 @@ static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) { static std::optional instCombineSVEVectorBinOp(InstCombiner &IC, IntrinsicInst &II) { + // Bail due to missing support for ISD::STRICT_ scalable vector operations. + if (II.isStrictFP()) + return std::nullopt; + auto *OpPredicate = II.getOperand(0); auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID()); if (BinOpCode == Instruction::BinaryOpsEnd || diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll index 0272ffb3e2d54..91b47410e8cb3 100644 --- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll +++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll @@ -8,8 +8,9 @@ target triple = "aarch64-unknown-linux-gnu" define @replace_fadd_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @replace_fadd_intrinsic_double_strictfp( -; CHECK-NEXT: [[TMP1:%.*]] = fadd [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: ret [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2:[0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.fadd.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] +; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 %2 = tail call @llvm.aarch64.sve.fadd.nxv2f64( %1, %a, %b) #1 @@ -21,8 +22,9 @@ define @replace_fadd_intrinsic_double_strictfp( @call_replace_fadd_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @call_replace_fadd_intrinsic_double_strictfp( -; CHECK-NEXT: [[DOTSTRICT:%.*]] = call @llvm.experimental.constrained.fadd.nxv2f64( [[A:%.*]], [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR3:[0-9]+]] -; CHECK-NEXT: ret [[DOTSTRICT]] +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] +; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.fadd.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] +; CHECK-NEXT: ret [[TMP2]] ; %1 = call @replace_fadd_intrinsic_double_strictfp( %a, %b) #1 ret %1 @@ -33,8 +35,9 @@ define @call_replace_fadd_intrinsic_double_strictfp( @replace_fmul_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @replace_fmul_intrinsic_double_strictfp( -; CHECK-NEXT: [[TMP1:%.*]] = fmul [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: ret [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] +; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.fmul.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] +; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 %2 = tail call @llvm.aarch64.sve.fmul.nxv2f64( %1, %a, %b) #1 @@ -46,8 +49,9 @@ define @replace_fmul_intrinsic_double_strictfp( @call_replace_fmul_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @call_replace_fmul_intrinsic_double_strictfp( -; CHECK-NEXT: [[DOTSTRICT:%.*]] = call @llvm.experimental.constrained.fmul.nxv2f64( [[A:%.*]], [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR3]] -; CHECK-NEXT: ret [[DOTSTRICT]] +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] +; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.fmul.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] +; CHECK-NEXT: ret [[TMP2]] ; %1 = call @replace_fmul_intrinsic_double_strictfp( %a, %b) #1 ret %1 @@ -58,8 +62,9 @@ define @call_replace_fmul_intrinsic_double_strictfp( @replace_fsub_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @replace_fsub_intrinsic_double_strictfp( -; CHECK-NEXT: [[TMP1:%.*]] = fsub [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: ret [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] +; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.fsub.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] +; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 %2 = tail call @llvm.aarch64.sve.fsub.nxv2f64( %1, %a, %b) #1 @@ -71,8 +76,9 @@ define @replace_fsub_intrinsic_double_strictfp( @call_replace_fsub_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @call_replace_fsub_intrinsic_double_strictfp( -; CHECK-NEXT: [[DOTSTRICT:%.*]] = call @llvm.experimental.constrained.fsub.nxv2f64( [[A:%.*]], [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR3]] -; CHECK-NEXT: ret [[DOTSTRICT]] +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] +; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.fsub.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] +; CHECK-NEXT: ret [[TMP2]] ; %1 = call @replace_fsub_intrinsic_double_strictfp( %a, %b) #1 ret %1 @@ -89,6 +95,5 @@ attributes #1 = { strictfp } ;. ; CHECK: attributes #[[ATTR0:[0-9]+]] = { strictfp "target-features"="+sve" } ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) } -; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) } -; CHECK: attributes #[[ATTR3]] = { strictfp } +; CHECK: attributes #[[ATTR2]] = { strictfp } ;.