Skip to content

Commit

Permalink
[RISCV] add Half-precision test for vle/vse
Browse files Browse the repository at this point in the history
Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D109681
  • Loading branch information
Shao-Ce Sun committed Sep 14, 2021
1 parent 7c76cef commit d4f25d0
Show file tree
Hide file tree
Showing 2 changed files with 120 additions and 0 deletions.
60 changes: 60 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
Expand Up @@ -1195,3 +1195,63 @@ vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) {
vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) {
return vle16_v_f16m8(base, vl);
}

// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
//
vfloat16mf4_t test_vle16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) {
return vle16_v_f16mf4_m (mask, maskedoff, base, vl);
}

// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
//
vfloat16mf2_t test_vle16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) {
return vle16_v_f16mf2_m (mask, maskedoff, base, vl);
}

// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
//
vfloat16m1_t test_vle16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) {
return vle16_v_f16m1_m (mask, maskedoff, base, vl);
}

// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
vfloat16m2_t test_vle16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) {
return vle16_v_f16m2_m (mask, maskedoff, base, vl);
}

// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
//
vfloat16m4_t test_vle16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) {
return vle16_v_f16m4_m (mask, maskedoff, base, vl);
}

// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
//
vfloat16m8_t test_vle16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) {
return vle16_v_f16m8_m (mask, maskedoff, base, vl);
}
60 changes: 60 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c
Expand Up @@ -1195,3 +1195,63 @@ void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) {
void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) {
return vse16_v_f16m8(base, value, vl);
}

// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) {
return vse16_v_f16mf4_m (mask, base, value, vl);
}

// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) {
return vse16_v_f16mf2_m (mask, base, value, vl);
}

// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) {
return vse16_v_f16m1_m (mask, base, value, vl);
}

// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) {
return vse16_v_f16m2_m (mask, base, value, vl);
}

// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) {
return vse16_v_f16m4_m (mask, base, value, vl);
}

// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m8_m (vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) {
return vse16_v_f16m8_m (mask, base, value, vl);
}

0 comments on commit d4f25d0

Please sign in to comment.