614 changes: 614 additions & 0 deletions clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c

Large diffs are not rendered by default.

366 changes: 366 additions & 0 deletions clang/test/CodeGen/arm-mve-intrinsics/vqmovn.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,366 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
// RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s

#include <arm_mve.h>

// CHECK-LABEL: @test_vqmovnbq_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
int8x16_t test_vqmovnbq_s16(int8x16_t a, int16x8_t b)
{
#ifdef POLYMORPHIC
return vqmovnbq(a, b);
#else /* POLYMORPHIC */
return vqmovnbq_s16(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
int16x8_t test_vqmovnbq_s32(int16x8_t a, int32x4_t b)
{
#ifdef POLYMORPHIC
return vqmovnbq(a, b);
#else /* POLYMORPHIC */
return vqmovnbq_s32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vqmovnbq_u16(uint8x16_t a, uint16x8_t b)
{
#ifdef POLYMORPHIC
return vqmovnbq(a, b);
#else /* POLYMORPHIC */
return vqmovnbq_u16(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vqmovnbq_u32(uint16x8_t a, uint32x4_t b)
{
#ifdef POLYMORPHIC
return vqmovnbq(a, b);
#else /* POLYMORPHIC */
return vqmovnbq_u32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
int8x16_t test_vqmovntq_s16(int8x16_t a, int16x8_t b)
{
#ifdef POLYMORPHIC
return vqmovntq(a, b);
#else /* POLYMORPHIC */
return vqmovntq_s16(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
int16x8_t test_vqmovntq_s32(int16x8_t a, int32x4_t b)
{
#ifdef POLYMORPHIC
return vqmovntq(a, b);
#else /* POLYMORPHIC */
return vqmovntq_s32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vqmovntq_u16(uint8x16_t a, uint16x8_t b)
{
#ifdef POLYMORPHIC
return vqmovntq(a, b);
#else /* POLYMORPHIC */
return vqmovntq_u16(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vqmovntq_u32(uint16x8_t a, uint32x4_t b)
{
#ifdef POLYMORPHIC
return vqmovntq(a, b);
#else /* POLYMORPHIC */
return vqmovntq_u32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovunbq_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vqmovunbq_s16(uint8x16_t a, int16x8_t b)
{
#ifdef POLYMORPHIC
return vqmovunbq(a, b);
#else /* POLYMORPHIC */
return vqmovunbq_s16(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovunbq_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vqmovunbq_s32(uint16x8_t a, int32x4_t b)
{
#ifdef POLYMORPHIC
return vqmovunbq(a, b);
#else /* POLYMORPHIC */
return vqmovunbq_s32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovuntq_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vqmovuntq_s16(uint8x16_t a, int16x8_t b)
{
#ifdef POLYMORPHIC
return vqmovuntq(a, b);
#else /* POLYMORPHIC */
return vqmovuntq_s16(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovuntq_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vqmovuntq_s32(uint16x8_t a, int32x4_t b)
{
#ifdef POLYMORPHIC
return vqmovuntq(a, b);
#else /* POLYMORPHIC */
return vqmovuntq_s32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_m_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vqmovnbq_m_s16(int8x16_t a, int16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovnbq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovnbq_m_s16(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_m_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vqmovnbq_m_s32(int16x8_t a, int32x4_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovnbq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovnbq_m_s32(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_m_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vqmovnbq_m_u16(uint8x16_t a, uint16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovnbq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovnbq_m_u16(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovnbq_m_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vqmovnbq_m_u32(uint16x8_t a, uint32x4_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovnbq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovnbq_m_u32(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_m_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vqmovntq_m_s16(int8x16_t a, int16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovntq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovntq_m_s16(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_m_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vqmovntq_m_s32(int16x8_t a, int32x4_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovntq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovntq_m_s32(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_m_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vqmovntq_m_u16(uint8x16_t a, uint16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovntq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovntq_m_u16(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovntq_m_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vqmovntq_m_u32(uint16x8_t a, uint32x4_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovntq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovntq_m_u32(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovunbq_m_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vqmovunbq_m_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovunbq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovunbq_m_s16(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovunbq_m_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vqmovunbq_m_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovunbq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovunbq_m_s32(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovuntq_m_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vqmovuntq_m_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovuntq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovuntq_m_s16(a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vqmovuntq_m_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vqmovuntq_m_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
return vqmovuntq_m(a, b, p);
#else /* POLYMORPHIC */
return vqmovuntq_m_s32(a, b, p);
#endif /* POLYMORPHIC */
}

41 changes: 29 additions & 12 deletions llvm/include/llvm/IR/IntrinsicsARM.td
Original file line number Diff line number Diff line change
Expand Up @@ -911,8 +911,22 @@ multiclass MVEPredicatedM<list<LLVMType> rets, list<LLVMType> params,
LLVMMatchType<0>, rets[0])], props>;
}

// Intrinsic with a predicated and a non-predicated case. The predicated case
// has two additional parameters: inactive (the value for inactive lanes, can
// be undef) and predicate.
multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
list<LLVMType> params, LLVMType inactive,
LLVMType predicate,
list<IntrinsicProperty> props = [IntrNoMem]> {
def "": Intrinsic<rets, flags # params, props>;
def _predicated: Intrinsic<rets, flags # [inactive] # params # [predicate],
props>;
}

defm int_arm_mve_vcvt_narrow: MVEPredicated<[llvm_v8f16_ty],
[llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty], llvm_v4i1_ty>;
defm int_arm_mve_vcvt_widen: MVEMXPredicated<[llvm_v4f32_ty], [],
[llvm_v8f16_ty, llvm_i32_ty], llvm_v4f32_ty, llvm_v4i1_ty>;

defm int_arm_mve_vldr_gather_base: MVEPredicated<
[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
Expand Down Expand Up @@ -1044,18 +1058,6 @@ def int_arm_mve_vmull_poly: Intrinsic<
[llvm_anyvector_ty],
[llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem]>;

// Intrinsic with a predicated and a non-predicated case. The predicated case
// has two additional parameters: inactive (the value for inactive lanes, can
// be undef) and predicate.
multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
list<LLVMType> params, LLVMType inactive,
LLVMType predicate,
list<IntrinsicProperty> props = [IntrNoMem]> {
def "": Intrinsic<rets, flags # params, props>;
def _predicated: Intrinsic<rets, flags # [inactive] # params # [predicate],
props>;
}

// The first two parameters are compile-time constants:
// * Halving: 0 means halving (vhcaddq), 1 means non-halving (vcaddq)
// instruction. Note: the flag is inverted to match the corresonding
Expand Down Expand Up @@ -1164,6 +1166,12 @@ def int_arm_mve_vcvt_fp_int_predicated: Intrinsic<
llvm_anyvector_ty /* predicate */, LLVMMatchType<0> /* inactive */],
[IntrNoMem]>;

foreach suffix = ["a","n","p","m"] in {
defm "int_arm_mve_vcvt"#suffix: MVEMXPredicated<
[llvm_anyvector_ty /* output */], [llvm_i32_ty /* unsigned */],
[llvm_anyvector_ty /* input */], LLVMMatchType<0>, llvm_anyvector_ty>;
}

def int_arm_mve_vrintn: Intrinsic<
[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_vcls: Intrinsic<
Expand Down Expand Up @@ -1211,4 +1219,13 @@ def int_arm_mve_vmovn_predicated: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_i32_ty /* top half */,
llvm_anyvector_ty /* predicate */], [IntrNoMem]>;

def int_arm_mve_vqmovn: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
llvm_i32_ty /* top half */], [IntrNoMem]>;
def int_arm_mve_vqmovn_predicated: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
llvm_i32_ty /* top half */, llvm_anyvector_ty /* pred */], [IntrNoMem]>;

} // end TargetPrefix
93 changes: 78 additions & 15 deletions llvm/lib/Target/ARM/ARMInstrMVE.td
Original file line number Diff line number Diff line change
Expand Up @@ -3659,21 +3659,42 @@ class MVE_VCVT_fp_int_anpm<string suffix, bits<2> size, bit op, string anpm,
let validForTailPredication = 1;
}

multiclass MVE_VCVT_fp_int_anpm_multi<string suffix, bits<2> size, bit op,
list<dag> pattern=[]> {
def a : MVE_VCVT_fp_int_anpm<suffix, size, op, "a", 0b00>;
def n : MVE_VCVT_fp_int_anpm<suffix, size, op, "n", 0b01>;
def p : MVE_VCVT_fp_int_anpm<suffix, size, op, "p", 0b10>;
def m : MVE_VCVT_fp_int_anpm<suffix, size, op, "m", 0b11>;
multiclass MVE_VCVT_fp_int_anpm_inner<MVEVectorVTInfo Int, MVEVectorVTInfo Flt,
string anpm, bits<2> rm> {
def "": MVE_VCVT_fp_int_anpm<Int.Suffix # "." # Flt.Suffix, Int.Size,
Int.Unsigned, anpm, rm>;

defvar Inst = !cast<Instruction>(NAME);
defvar IntrBaseName = "int_arm_mve_vcvt" # anpm;
defvar UnpredIntr = !cast<Intrinsic>(IntrBaseName);
defvar PredIntr = !cast<Intrinsic>(IntrBaseName # "_predicated");

let Predicates = [HasMVEFloat] in {
def : Pat<(Int.Vec (UnpredIntr (i32 Int.Unsigned), (Flt.Vec MQPR:$in))),
(Int.Vec (Inst (Flt.Vec MQPR:$in)))>;

def : Pat<(Int.Vec (PredIntr (i32 Int.Unsigned), (Int.Vec MQPR:$inactive),
(Flt.Vec MQPR:$in), (Flt.Pred VCCR:$pred))),
(Int.Vec (Inst (Flt.Vec MQPR:$in), ARMVCCThen,
(Flt.Pred VCCR:$pred), (Int.Vec MQPR:$inactive)))>;
}
}

multiclass MVE_VCVT_fp_int_anpm_outer<MVEVectorVTInfo Int,
MVEVectorVTInfo Flt> {
defm a : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "a", 0b00>;
defm n : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "n", 0b01>;
defm p : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "p", 0b10>;
defm m : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "m", 0b11>;
}

// This defines instructions such as MVE_VCVTu16f16a, with an explicit
// rounding-mode suffix on the mnemonic. The class below will define
// the bare MVE_VCVTu16f16 (with implied rounding toward zero).
defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_multi<"s16.f16", 0b01, 0b0>;
defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_multi<"u16.f16", 0b01, 0b1>;
defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_multi<"s32.f32", 0b10, 0b0>;
defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_multi<"u32.f32", 0b10, 0b1>;
defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_outer<MVE_v8s16, MVE_v8f16>;
defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_outer<MVE_v8u16, MVE_v8f16>;
defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_outer<MVE_v4s32, MVE_v4f32>;
defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_outer<MVE_v4u32, MVE_v4f32>;

class MVE_VCVT_fp_int<string suffix, bits<2> size, bit toint, bit unsigned,
list<dag> pattern=[]>
Expand Down Expand Up @@ -4452,11 +4473,41 @@ defm : MVE_VMOVN_p<MVE_VMOVNi32th, 1, MVE_v8i16, MVE_v4i32>;
defm : MVE_VMOVN_p<MVE_VMOVNi16bh, 0, MVE_v16i8, MVE_v8i16>;
defm : MVE_VMOVN_p<MVE_VMOVNi16th, 1, MVE_v16i8, MVE_v8i16>;

multiclass MVE_VQMOVN_p<Instruction Inst, bit outU, bit inU, bit top,
MVEVectorVTInfo VTI, MVEVectorVTInfo InVTI> {
def : Pat<(VTI.Vec (int_arm_mve_vqmovn (VTI.Vec MQPR:$Qd_src),
(InVTI.Vec MQPR:$Qm),
(i32 outU), (i32 inU), (i32 top))),
(VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src),
(InVTI.Vec MQPR:$Qm)))>;

def : Pat<(VTI.Vec (int_arm_mve_vqmovn_predicated (VTI.Vec MQPR:$Qd_src),
(InVTI.Vec MQPR:$Qm),
(i32 outU), (i32 inU), (i32 top),
(InVTI.Pred VCCR:$pred))),
(VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src),
(InVTI.Vec MQPR:$Qm),
ARMVCCThen, (InVTI.Pred VCCR:$pred)))>;
}

defm : MVE_VQMOVN_p<MVE_VQMOVNs32bh, 0, 0, 0, MVE_v8i16, MVE_v4i32>;
defm : MVE_VQMOVN_p<MVE_VQMOVNs32th, 0, 0, 1, MVE_v8i16, MVE_v4i32>;
defm : MVE_VQMOVN_p<MVE_VQMOVNs16bh, 0, 0, 0, MVE_v16i8, MVE_v8i16>;
defm : MVE_VQMOVN_p<MVE_VQMOVNs16th, 0, 0, 1, MVE_v16i8, MVE_v8i16>;
defm : MVE_VQMOVN_p<MVE_VQMOVNu32bh, 1, 1, 0, MVE_v8i16, MVE_v4i32>;
defm : MVE_VQMOVN_p<MVE_VQMOVNu32th, 1, 1, 1, MVE_v8i16, MVE_v4i32>;
defm : MVE_VQMOVN_p<MVE_VQMOVNu16bh, 1, 1, 0, MVE_v16i8, MVE_v8i16>;
defm : MVE_VQMOVN_p<MVE_VQMOVNu16th, 1, 1, 1, MVE_v16i8, MVE_v8i16>;
defm : MVE_VQMOVN_p<MVE_VQMOVUNs32bh, 1, 0, 0, MVE_v8i16, MVE_v4i32>;
defm : MVE_VQMOVN_p<MVE_VQMOVUNs32th, 1, 0, 1, MVE_v8i16, MVE_v4i32>;
defm : MVE_VQMOVN_p<MVE_VQMOVUNs16bh, 1, 0, 0, MVE_v16i8, MVE_v8i16>;
defm : MVE_VQMOVN_p<MVE_VQMOVUNs16th, 1, 0, 1, MVE_v16i8, MVE_v8i16>;

class MVE_VCVT_ff<string iname, string suffix, bit op, bit T,
list<dag> pattern=[]>
: MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qm),
"$Qd, $Qm", vpred_n, "$Qd = $Qd_src", pattern> {
dag iops_extra, vpred_ops vpred, string cstr>
: MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd),
!con(iops_extra, (ins MQPR:$Qm)), "$Qd, $Qm",
vpred, cstr, []> {
let Inst{28} = op;
let Inst{21-16} = 0b111111;
let Inst{12} = T;
Expand All @@ -4467,7 +4518,8 @@ class MVE_VCVT_ff<string iname, string suffix, bit op, bit T,
}

multiclass MVE_VCVT_f2h_m<string iname, int half> {
def "": MVE_VCVT_ff<iname, "f16.f32", 0b0, half>;
def "": MVE_VCVT_ff<iname, "f16.f32", 0b0, half,
(ins MQPR:$Qd_src), vpred_n, "$Qd = $Qd_src">;
defvar Inst = !cast<Instruction>(NAME);

let Predicates = [HasMVEFloat] in {
Expand All @@ -4483,7 +4535,18 @@ multiclass MVE_VCVT_f2h_m<string iname, int half> {
}

multiclass MVE_VCVT_h2f_m<string iname, int half> {
def "": MVE_VCVT_ff<iname, "f32.f16", 0b1, half>;
def "": MVE_VCVT_ff<iname, "f32.f16", 0b1, half, (ins), vpred_r, "">;
defvar Inst = !cast<Instruction>(NAME);

let Predicates = [HasMVEFloat] in {
def : Pat<(v4f32 (int_arm_mve_vcvt_widen (v8f16 MQPR:$Qm), (i32 half))),
(v4f32 (Inst (v8f16 MQPR:$Qm)))>;
def : Pat<(v4f32 (int_arm_mve_vcvt_widen_predicated
(v4f32 MQPR:$inactive), (v8f16 MQPR:$Qm), (i32 half),
(v4i1 VCCR:$mask))),
(v4f32 (Inst (v8f16 MQPR:$Qm), ARMVCCThen,
(v4i1 VCCR:$mask), (v4f32 MQPR:$inactive)))>;
}
}

defm MVE_VCVTf16f32bh : MVE_VCVT_f2h_m<"vcvtb", 0b0>;
Expand Down
50 changes: 50 additions & 0 deletions llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)

declare <8 x half> @llvm.arm.mve.vcvt.narrow(<8 x half>, <4 x float>, i32)
declare <8 x half> @llvm.arm.mve.vcvt.narrow.predicated(<8 x half>, <4 x float>, i32, <4 x i1>)
declare <4 x float> @llvm.arm.mve.vcvt.widen(<8 x half>, i32)
declare <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float>, <8 x half>, i32, <4 x i1>)

declare <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32, <8 x i16>, i32)
declare <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32, <4 x i32>, i32)
Expand Down Expand Up @@ -367,3 +369,51 @@ entry:
%2 = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, i32 32, <4 x i1> %1)
ret <4 x i32> %2
}

define arm_aapcs_vfpcc <4 x float> @test_vcvtbq_f32_f16(<8 x half> %a) {
; CHECK-LABEL: test_vcvtbq_f32_f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcvtb.f32.f16 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = tail call <4 x float> @llvm.arm.mve.vcvt.widen(<8 x half> %a, i32 0)
ret <4 x float> %0
}

define arm_aapcs_vfpcc <4 x float> @test_vcvttq_f32_f16(<8 x half> %a) {
; CHECK-LABEL: test_vcvttq_f32_f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcvtt.f32.f16 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = tail call <4 x float> @llvm.arm.mve.vcvt.widen(<8 x half> %a, i32 1)
ret <4 x float> %0
}

define arm_aapcs_vfpcc <4 x float> @test_vcvtbq_m_f32_f16(<4 x float> %inactive, <8 x half> %a, i16 zeroext %p) {
; CHECK-LABEL: test_vcvtbq_m_f32_f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vcvtbt.f32.f16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float> %inactive, <8 x half> %a, i32 0, <4 x i1> %1)
ret <4 x float> %2
}

define arm_aapcs_vfpcc <4 x float> @test_vcvttq_m_f32_f16(<4 x float> %inactive, <8 x half> %a, i16 zeroext %p) {
; CHECK-LABEL: test_vcvttq_m_f32_f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vcvttt.f32.f16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <4 x float> @llvm.arm.mve.vcvt.widen.predicated(<4 x float> %inactive, <8 x half> %a, i32 1, <4 x i1> %1)
ret <4 x float> %2
}
631 changes: 631 additions & 0 deletions llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll

Large diffs are not rendered by default.

299 changes: 299 additions & 0 deletions llvm/test/CodeGen/Thumb2/mve-intrinsics/vqmovn.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,299 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s

define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_s16(<16 x i8> %a, <8 x i16> %b) {
; CHECK-LABEL: test_vqmovnbq_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnb.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 0)
ret <16 x i8> %0
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_s32(<8 x i16> %a, <4 x i32> %b) {
; CHECK-LABEL: test_vqmovnbq_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnb.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 0)
ret <8 x i16> %0
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_u16(<16 x i8> %a, <8 x i16> %b) {
; CHECK-LABEL: test_vqmovnbq_u16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnb.u16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 0)
ret <16 x i8> %0
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_u32(<8 x i16> %a, <4 x i32> %b) {
; CHECK-LABEL: test_vqmovnbq_u32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnb.u32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 0)
ret <8 x i16> %0
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_s16(<16 x i8> %a, <8 x i16> %b) {
; CHECK-LABEL: test_vqmovntq_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnt.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 1)
ret <16 x i8> %0
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_s32(<8 x i16> %a, <4 x i32> %b) {
; CHECK-LABEL: test_vqmovntq_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnt.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 1)
ret <8 x i16> %0
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_u16(<16 x i8> %a, <8 x i16> %b) {
; CHECK-LABEL: test_vqmovntq_u16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnt.u16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 1)
ret <16 x i8> %0
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_u32(<8 x i16> %a, <4 x i32> %b) {
; CHECK-LABEL: test_vqmovntq_u32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovnt.u32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 1)
ret <8 x i16> %0
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovunbq_s16(<16 x i8> %a, <8 x i16> %b) {
; CHECK-LABEL: test_vqmovunbq_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovunb.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 0)
ret <16 x i8> %0
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovunbq_s32(<8 x i16> %a, <4 x i32> %b) {
; CHECK-LABEL: test_vqmovunbq_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovunb.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 0)
ret <8 x i16> %0
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovuntq_s16(<16 x i8> %a, <8 x i16> %b) {
; CHECK-LABEL: test_vqmovuntq_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovunt.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 1)
ret <16 x i8> %0
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovuntq_s32(<8 x i16> %a, <4 x i32> %b) {
; CHECK-LABEL: test_vqmovuntq_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vqmovunt.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 1)
ret <8 x i16> %0
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovnbq_m_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovnbt.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
%2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 0, <8 x i1> %1)
ret <16 x i8> %2
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovnbq_m_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovnbt.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 0, <4 x i1> %1)
ret <8 x i16> %2
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_m_u16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovnbq_m_u16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovnbt.u16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
%2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 0, <8 x i1> %1)
ret <16 x i8> %2
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_m_u32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovnbq_m_u32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovnbt.u32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 0, <4 x i1> %1)
ret <8 x i16> %2
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovntq_m_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovntt.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
%2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 1, <8 x i1> %1)
ret <16 x i8> %2
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovntq_m_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovntt.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 1, <4 x i1> %1)
ret <8 x i16> %2
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_m_u16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovntq_m_u16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovntt.u16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
%2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 1, <8 x i1> %1)
ret <16 x i8> %2
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_m_u32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovntq_m_u32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovntt.u32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 1, <4 x i1> %1)
ret <8 x i16> %2
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovunbq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovunbq_m_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovunbt.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
%2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 0, <8 x i1> %1)
ret <16 x i8> %2
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovunbq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovunbq_m_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovunbt.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 0, <4 x i1> %1)
ret <8 x i16> %2
}

define arm_aapcs_vfpcc <16 x i8> @test_vqmovuntq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovuntq_m_s16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovuntt.s16 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
%2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 1, <8 x i1> %1)
ret <16 x i8> %2
}

define arm_aapcs_vfpcc <8 x i16> @test_vqmovuntq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
; CHECK-LABEL: test_vqmovuntq_m_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r0
; CHECK-NEXT: vpst
; CHECK-NEXT: vqmovuntt.s32 q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 1, <4 x i1> %1)
ret <8 x i16> %2
}

declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)

declare <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8>, <8 x i16>, i32, i32, i32)
declare <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32)

declare <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8>, <8 x i16>, i32, i32, i32, <8 x i1>)
declare <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16>, <4 x i32>, i32, i32, i32, <4 x i1>)