diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 28748a1dfc52a0..a70fc821d4c6bf 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -14694,6 +14694,8 @@ of the two arguments. -0.0 is considered to be less than +0.0 for this intrinsic. Note that these are the semantics specified in the draft of IEEE 754-2018. +.. _int_copysign: + '``llvm.copysign.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -18866,6 +18868,55 @@ Examples: %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison +.. _int_vp_copysign: + +'``llvm.vp.copysign.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x float> @llvm.vp.copysign.v16f32 (<16 x float> , <16 x float> , <16 x i1> , i32 ) + declare @llvm.vp.copysign.nxv4f32 ( , , , i32 ) + declare <256 x double> @llvm.vp.copysign.v256f64 (<256 x double> , <256 x double> , <256 x i1> , i32 ) + +Overview: +""""""""" + +Predicated floating-point copysign of two vectors of floating-point values. + + +Arguments: +"""""""""" + +The first two operands and the result have the same vector of floating-point type. The +third operand is the vector mask and has the same number of elements as the +result vector type. The fourth operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.copysign``' intrinsic performs floating-point copysign (:ref:`copysign `) +of the first and second vector operand on each enabled lane. The result on +disabled lanes is a :ref:`poison value `. The operation is +performed in the default floating-point environment. + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %mag, <4 x float> %sign, <4 x i1> %mask, i32 %evl) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign) + %also.r = select <4 x i1> %mask, <4 x float> %t, <4 x float> poison + + .. _int_vp_minnum: '``llvm.vp.minnum.*``' Intrinsics diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index f52349fd76807e..cad7485dfe71e4 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1591,6 +1591,11 @@ let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in { LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; + def int_vp_copysign : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_ceil : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def index 6e40d7d79bd635..fc1b4aa7ed286c 100644 --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -253,6 +253,10 @@ BEGIN_REGISTER_VP(vp_fmuladd, 3, 4, VP_FMULADD, -1) VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fmuladd) END_REGISTER_VP(vp_fmuladd, VP_FMULADD) +// llvm.vp.copysign(x,y,mask,vlen) +BEGIN_REGISTER_VP(vp_copysign, 2, 3, VP_FCOPYSIGN, -1) +END_REGISTER_VP(vp_copysign, VP_FCOPYSIGN) + // llvm.vp.minnum(x, y, mask,vlen) BEGIN_REGISTER_VP(vp_minnum, 2, 3, VP_FMINNUM, -1) END_REGISTER_VP(vp_minnum, VP_FMINNUM) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 6d3562f07c90a5..545e9e99878f98 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1104,6 +1104,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { case ISD::USHLSAT: case ISD::ROTL: case ISD::ROTR: + case ISD::VP_FCOPYSIGN: SplitVecRes_BinOp(N, Lo, Hi); break; case ISD::FMA: case ISD::VP_FMA: @@ -3962,6 +3963,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::VP_FMUL: case ISD::VP_FDIV: case ISD::VP_FREM: + case ISD::VP_FCOPYSIGN: Res = WidenVecRes_Binary(N); break; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 67e110252d95c9..57b2ff6fc72ff7 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -452,7 +452,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, ISD::VP_SETCC, ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND, ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM, ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND, - ISD::VP_FROUNDEVEN}; + ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN}; static const unsigned IntegerVecReduceOps[] = { ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, @@ -3936,6 +3936,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, return lowerVPOp(Op, DAG, RISCVISD::FMINNUM_VL, /*HasMergeOp*/ true); case ISD::VP_FMAXNUM: return lowerVPOp(Op, DAG, RISCVISD::FMAXNUM_VL, /*HasMergeOp*/ true); + case ISD::VP_FCOPYSIGN: + return lowerVPOp(Op, DAG, RISCVISD::FCOPYSIGN_VL, /*HasMergeOp*/ true); case ISD::VP_SIGN_EXTEND: case ISD::VP_ZERO_EXTEND: if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll new file mode 100644 index 00000000000000..70ef41bd152c4b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -0,0 +1,414 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.copysign.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.copysign.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.copysign.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.copysign.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.copysign.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.copysign.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.copysign.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.copysign.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.copysign.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.copysign.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.copysign.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <15 x double> @llvm.vp.copysign.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) + +define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v15f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl) + ret <15 x double> %v +} + +define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v15f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl) + ret <15 x double> %v +} + +declare <16 x double> @llvm.vp.copysign.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +declare <32 x double> @llvm.vp.copysign.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) + +define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a3, a2, -16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: bltu a2, a3, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a3 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v16, v16, v8, v0.t +; CHECK-NEXT: bltu a2, a0, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} + +define <32 x double> @vfsgnj_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_v32f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: addi a3, a2, -16 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: bltu a2, a3, .LBB27_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a3 +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vle64.v v0, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: vfsgnj.vv v16, v16, v24 +; CHECK-NEXT: bltu a2, a0, .LBB27_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v0 +; CHECK-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll new file mode 100644 index 00000000000000..5dc2ed5ca619ff --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.copysign.nxv1f16(, , , i32) + +define @vfsgnj_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv1f16_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv2f16(, , , i32) + +define @vfsgnj_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv2f16_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv4f16(, , , i32) + +define @vfsgnj_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv4f16_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv8f16(, , , i32) + +define @vfsgnj_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv8f16_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv16f16(, , , i32) + +define @vfsgnj_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv16f16_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv32f16(, , , i32) + +define @vfsgnj_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv32f16_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv1f32(, , , i32) + +define @vfsgnj_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv1f32_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv2f32(, , , i32) + +define @vfsgnj_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv2f32_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv4f32(, , , i32) + +define @vfsgnj_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv4f32_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv8f32(, , , i32) + +define @vfsgnj_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv8f32_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv16f32(, , , i32) + +define @vfsgnj_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv16f32_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv1f64(, , , i32) + +define @vfsgnj_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv1f64_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv2f64(, , , i32) + +define @vfsgnj_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv2f64_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv4f64(, , , i32) + +define @vfsgnj_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv4f64_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.copysign.nxv8f64(, , , i32) + +define @vfsgnj_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.copysign.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsgnj_vv_nxv8f64_unmasked( %va, %vb, i32 zeroext %evl) { +; CHECK-LABEL: vfsgnj_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.copysign.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp index bf0fb5e77f9fd3..be3a315ca53ff0 100644 --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -46,8 +46,8 @@ class VPIntrinsicTest : public testing::Test { Str << " declare <8 x i32> @llvm.vp." << BinaryIntOpcode << ".v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) "; - const char *BinaryFPOpcodes[] = {"fadd", "fsub", "fmul", "fdiv", - "frem", "minnum", "maxnum"}; + const char *BinaryFPOpcodes[] = {"fadd", "fsub", "fmul", "fdiv", + "frem", "minnum", "maxnum", "copysign"}; for (const char *BinaryFPOpcode : BinaryFPOpcodes) Str << " declare <8 x float> @llvm.vp." << BinaryFPOpcode << ".v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) ";