diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll index 127c3709151a0..688e882021068 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" @@ -37,11 +37,69 @@ define <8 x i8> @trn2.v8i8(<8 x i8> %v0, <8 x i8> %v1) { } define <16 x i8> @trn1.v16i8(<16 x i8> %v0, <16 x i8> %v1) { +; RV32-LABEL: trn1.v16i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vid.v v11 +; RV32-NEXT: vrgather.vv v10, v8, v11 +; RV32-NEXT: lui a0, 11 +; RV32-NEXT: addi a0, a0, -1366 +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV32-NEXT: vmv.s.x v0, a0 +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vadd.vi v8, v11, -1 +; RV32-NEXT: vrgather.vv v10, v9, v8, v0.t +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: trn1.v16i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vid.v v11 +; RV64-NEXT: vrgather.vv v10, v8, v11 +; RV64-NEXT: lui a0, 11 +; RV64-NEXT: addiw a0, a0, -1366 +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV64-NEXT: vmv.s.x v0, a0 +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vadd.vi v8, v11, -1 +; RV64-NEXT: vrgather.vv v10, v9, v8, v0.t +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %tmp0 = shufflevector <16 x i8> %v0, <16 x i8> %v1, <16 x i32> ret <16 x i8> %tmp0 } define <16 x i8> @trn2.v16i8(<16 x i8> %v0, <16 x i8> %v1) { +; RV32-LABEL: trn2.v16i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vid.v v11 +; RV32-NEXT: vadd.vi v12, v11, 1 +; RV32-NEXT: vrgather.vv v10, v8, v12 +; RV32-NEXT: lui a0, 11 +; RV32-NEXT: addi a0, a0, -1366 +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV32-NEXT: vmv.s.x v0, a0 +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vrgather.vv v10, v9, v11, v0.t +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: trn2.v16i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vid.v v11 +; RV64-NEXT: vadd.vi v12, v11, 1 +; RV64-NEXT: vrgather.vv v10, v8, v12 +; RV64-NEXT: lui a0, 11 +; RV64-NEXT: addiw a0, a0, -1366 +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV64-NEXT: vmv.s.x v0, a0 +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vrgather.vv v10, v9, v11, v0.t +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %tmp0 = shufflevector <16 x i8> %v0, <16 x i8> %v1, <16 x i32> ret <16 x i8> %tmp0 } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll index bbe900dc5d229..f6bf618ca665c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f16(<1 x half>, metadata) define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) { @@ -32,12 +32,38 @@ define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) { declare <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half>, metadata) define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) { +; RV32-LABEL: vfptosi_v1f16_v1i7: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: fcvt.w.h a0, fa5, rtz +; RV32-NEXT: ret +; +; RV64-LABEL: vfptosi_v1f16_v1i7: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: fcvt.l.h a0, fa5, rtz +; RV64-NEXT: ret %evec = call <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half> %va, metadata !"fpexcept.strict") ret <1 x i7> %evec } declare <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half>, metadata) define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) { +; RV32-LABEL: vfptoui_v1f16_v1i7: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma +; RV32-NEXT: vfmv.f.s fa5, v8 +; RV32-NEXT: fcvt.wu.h a0, fa5, rtz +; RV32-NEXT: ret +; +; RV64-LABEL: vfptoui_v1f16_v1i7: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma +; RV64-NEXT: vfmv.f.s fa5, v8 +; RV64-NEXT: fcvt.lu.h a0, fa5, rtz +; RV64-NEXT: ret %evec = call <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half> %va, metadata !"fpexcept.strict") ret <1 x i7> %evec }