Skip to content

Commit

Permalink
[RISCV] Add the passthru operand for vmv.vv/vmv.vx/vfmv.vf IR intrins…
Browse files Browse the repository at this point in the history
…ics.

Add the passthru operand for
VMV_V_X_VL, VFMV_V_F_VL and SPLAT_VECTOR_SPLIT_I64_VL also.

The goal is support tail and mask policy in RVV builtins.
We focus on IR part first.
If the passthru operand is undef, we use tail agnostic, otherwise
use tail undisturbed.

Reviewed By: rogfer01

Differential Revision: https://reviews.llvm.org/D119688
  • Loading branch information
zakk0610 committed Feb 17, 2022
1 parent be77afe commit eeb7754
Show file tree
Hide file tree
Showing 21 changed files with 848 additions and 309 deletions.
5 changes: 3 additions & 2 deletions clang/include/clang/Basic/riscv_vector.td
Expand Up @@ -1755,7 +1755,7 @@ let HasMask = false, HasPolicy = false,
}

// 12.16. Vector Integer Move Instructions
let HasMask = false, HasPolicy = false in {
let HasMask = false, HasNoMaskPassThru = true, HasPolicy = false in {
let MangledName = "vmv_v" in {
defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
[["v", "Uv", "UvUv"]]>;
Expand Down Expand Up @@ -1890,7 +1890,8 @@ let HasMask = false, HasPolicy = false,
}

// 14.16. Vector Floating-Point Move Instruction
let HasMask = false, HasNoMaskedOverloaded = false, HasPolicy = false in
let HasMask = false, HasNoMaskPassThru = true, HasNoMaskedOverloaded = false,
HasPolicy = false in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd",
[["f", "v", "ve"]]>;

Expand Down
106 changes: 53 additions & 53 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
Expand Up @@ -8,7 +8,7 @@

// CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.i64(<vscale x 1 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) {
Expand All @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.i64(<vscale x 2 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.i64(<vscale x 4 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) {
Expand All @@ -35,7 +35,7 @@ vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.i64(<vscale x 8 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) {
Expand All @@ -44,7 +44,7 @@ vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.i64(<vscale x 16 x float> undef, float [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) {
Expand All @@ -53,7 +53,7 @@ vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.i64(<vscale x 1 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) {
Expand All @@ -62,7 +62,7 @@ vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.i64(<vscale x 2 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) {
Expand All @@ -71,7 +71,7 @@ vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.i64(<vscale x 4 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) {
Expand All @@ -80,7 +80,7 @@ vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.i64(<vscale x 8 x double> undef, double [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) {
Expand Down Expand Up @@ -251,7 +251,7 @@ vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.i64(<vscale x 1 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) {
Expand All @@ -260,7 +260,7 @@ vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.i64(<vscale x 2 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) {
Expand All @@ -269,7 +269,7 @@ vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.i64(<vscale x 4 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) {
Expand All @@ -278,7 +278,7 @@ vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.i64(<vscale x 8 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) {
Expand All @@ -287,7 +287,7 @@ vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.i64(<vscale x 16 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) {
Expand All @@ -296,7 +296,7 @@ vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) {

// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.i64(<vscale x 32 x half> undef, half [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) {
Expand Down

0 comments on commit eeb7754

Please sign in to comment.