Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -target-feature +experimental-zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4(
// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf4(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2(
// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf2(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1(
// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m1(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2(
// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m2(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4(
// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m4(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf4_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf2_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m1_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m2_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m4_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4_rm(
// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2_rm(
// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1_rm(
// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2_rm(
// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4_rm(
// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl);
}

vbfloat16mf4_t
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4_rm_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
test_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2_rm_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm,
vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1_rm_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2_rm_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4_rm_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -target-feature +experimental-zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2(
// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32mf2(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1(
// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m1(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2(
// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m2(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4(
// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m4(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8(
// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m8(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32mf2_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m1_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m2_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m4_m(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m8_m(vm, vs2, vl);
}

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -target-feature +experimental-zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4(
// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2(
// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1(
// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2(
// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4(
// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4_rm(
// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2_rm(
// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1_rm(
// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2_rm(
// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4_rm(
// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl);
}

vbfloat16mf4_t
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf4_rm_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
test_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvtbf16_f_f_w_bf16mf2_rm_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm,
vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvtbf16_f_f_w_bf16m1_rm_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvtbf16_f_f_w_bf16m2_rm_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvtbf16_f_f_w_bf16m4_rm_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -target-feature +experimental-zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2(
// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1(
// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2(
// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4(
// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8(
// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f(vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f(vm, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfwcvtbf16_f(vm, vs2, vl);
}

Large diffs are not rendered by default.

2,491 changes: 730 additions & 1,761 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vget.c

Large diffs are not rendered by default.

2,491 changes: 730 additions & 1,761 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vset.c

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -target-feature +experimental-zvfbfmin -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2_tu(
// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32mf2_tu(vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1_tu(
// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m1_tu(vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2_tu(
// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m2_tu(vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4_tu(
// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m4_tu(vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8_tu(
// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m8_tu(vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2_tum(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32mf2_tum(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1_tum(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m1_tum(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2_tum(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m2_tum(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4_tum(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m4_tum(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8_tum(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m8_tum(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2_tumu(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32mf2_tumu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1_tumu(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m1_tumu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2_tumu(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m2_tumu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4_tumu(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m4_tumu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8_tumu(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m8_tumu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvtbf16_f_f_v_f32mf2_mu(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32mf2_mu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvtbf16_f_f_v_f32m1_mu(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m1_mu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvtbf16_f_f_v_f32m2_mu(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m2_mu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvtbf16_f_f_v_f32m4_mu(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m4_mu(vm, vd, vs2, vl);
}

// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvtbf16_f_f_v_f32m8_mu(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfwcvtbf16_f_f_v_f32m8_mu(vm, vd, vs2, vl);
}

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Loading