Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | \
// RUN: opt -S -passes=mem2reg | FileCheck %s

#include <sifive_vector.h>

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | \
// RUN: opt -S -passes=mem2reg | FileCheck %s

#include <sifive_vector.h>

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | \
// RUN: opt -S -passes=mem2reg | FileCheck %s

#include <sifive_vector.h>

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
}

9 changes: 9 additions & 0 deletions clang/test/Preprocessor/riscv-target-features.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
// CHECK-NOT: __riscv_xcvsimd {{.*$}}
// CHECK-NOT: __riscv_xsfcie {{.*$}}
// CHECK-NOT: __riscv_xsfvcp {{.*$}}
// CHECK-NOT: __riscv_xsfvfnrclipxfqf {{.*$}}
// CHECK-NOT: __riscv_xsfvfwmaccqqq {{.*$}}
// CHECK-NOT: __riscv_xsfqmaccdod {{.*$}}
// CHECK-NOT: __riscv_xsfvqmaccqoq {{.*$}}
Expand Down Expand Up @@ -326,6 +327,14 @@
// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVCP-EXT %s
// CHECK-XSFVCP-EXT: __riscv_xsfvcp 1000000{{$}}

// RUN: %clang --target=riscv32-unknown-linux-gnu \
// RUN: -march=rv32ixsfvfnrclipxfqf -x c -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
// RUN: %clang --target=riscv64-unknown-linux-gnu \
// RUN: -march=rv64ixsfvfnrclipxfqf -x c -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
// CHECK-XSFVFNRCLIPXFQF-EXT: __riscv_xsfvfnrclipxfqf 1000000{{$}}

// RUN: %clang --target=riscv32-unknown-linux-gnu \
// RUN: -march=rv32ixsfvfwmaccqqq -x c -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFWMACCQQQ-EXT %s
Expand Down
8 changes: 8 additions & 0 deletions clang/test/Sema/rvv-required-features-invalid.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,11 @@ void test_xsfvqmaccqoq() {
void test_xsfvfwmaccqqq() {
__riscv_sf_vfwmacc_4x4x4(); // expected-error {{call to undeclared function '__riscv_sf_vfwmacc_4x4x4'}}
}

void test_xsfvfnrclipxfqf() {
__riscv_sf_vfnrclip_x_f_qf(); // expected-error {{call to undeclared function '__riscv_sf_vfnrclip_x_f_qf'}}
}

void test_xsfvfnrclipxufqf() {
__riscv_sf_vfnrclip_xu_f_qf(); // expected-error {{call to undeclared function '__riscv_sf_vfnrclip_xu_f_qf'}}
}
11 changes: 10 additions & 1 deletion clang/test/Sema/rvv-required-features.c
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp \
// RUN: -target-feature +xsfvqmaccdod -target-feature +xsfvqmaccqoq \
// RUN: -target-feature +zvfh -target-feature +xsfvfwmaccqqq %s -fsyntax-only -verify
// RUN: -target-feature +zvfh -target-feature +xsfvfwmaccqqq \
// RUN: -target-feature +xsfvfnrclipxfqf %s -fsyntax-only -verify

// expected-no-diagnostics

Expand Down Expand Up @@ -31,3 +32,11 @@ void test_xsfvqmaccqoq(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
void test_xsfvfwmaccqqq(vfloat32m1_t vd, vfloat16m1_t vs1, vfloat16mf2_t vs2, size_t vl) {
__riscv_sf_vfwmacc_4x4x4(vd, vs1, vs2, vl);
}

void test_xsfvfnrclipxufqf(vfloat32m1_t vs1, float rs2, size_t vl) {
__riscv_sf_vfnrclip_xu_f_qf(vs1, rs2, vl);
}

void test_xsfvfnrclipxfqf(vfloat32m1_t vs1, float rs2, size_t vl) {
__riscv_sf_vfnrclip_x_f_qf(vs1, rs2, vl);
}
1 change: 1 addition & 0 deletions clang/utils/TableGen/RISCVVEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -656,6 +656,7 @@ void RVVEmitter::createRVVIntrinsics(
.Case("RV64", RVV_REQ_RV64)
.Case("ZvfhminOrZvfh", RVV_REQ_ZvfhminOrZvfh)
.Case("Xsfvcp", RVV_REQ_Xsfvcp)
.Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
.Case("Xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq)
.Case("Xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod)
.Case("Xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq)
Expand Down
28 changes: 28 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,30 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}

// Input: (passthru, vector_in, scalar_in, frm, vl)
class RISCVSFCustomVFNRCLIPUnMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyfloat_ty, LLVMVectorElementType<1>,
llvm_anyint_ty, LLVMMatchType<2>],
[ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 4;
}

// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
class RISCVSFCustomVFNRCLIPMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyfloat_ty, LLVMVectorElementType<1>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>, LLVMMatchType<2>],
[ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 5;
}

multiclass RISCVSFCustomVFNRCLIP {
def NAME : RISCVSFCustomVFNRCLIPUnMasked;
def NAME # "_mask" : RISCVSFCustomVFNRCLIPMasked;
}

defm "" : RISCVSFCustomVC_X<["x", "i"]>;
defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>;
defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>;
Expand All @@ -155,4 +179,8 @@ let TargetPrefix = "riscv" in {

// XSfvfwmaccqqq
def int_riscv_sf_vfwmacc_4x4x4 : RISCVSFCustomVMACC;

// XSfvfnrclipxfqf
defm int_riscv_sf_vfnrclip_x_f_qf : RISCVSFCustomVFNRCLIP;
defm int_riscv_sf_vfnrclip_xu_f_qf : RISCVSFCustomVFNRCLIP;
} // TargetPrefix = "riscv"
3 changes: 3 additions & 0 deletions llvm/lib/Support/RISCVISAInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ static const RISCVSupportedExtension SupportedExtensions[] = {
{"xcvsimd", RISCVExtensionVersion{1, 0}},
{"xsfcie", RISCVExtensionVersion{1, 0}},
{"xsfvcp", RISCVExtensionVersion{1, 0}},
{"xsfvfnrclipxfqf", RISCVExtensionVersion{1, 0}},
{"xsfvfwmaccqqq", RISCVExtensionVersion{1, 0}},
{"xsfvqmaccdod", RISCVExtensionVersion{1, 0}},
{"xsfvqmaccqoq", RISCVExtensionVersion{1, 0}},
Expand Down Expand Up @@ -994,6 +995,7 @@ static const char *ImpliedExtsF[] = {"zicsr"};
static const char *ImpliedExtsV[] = {"zvl128b", "zve64d"};
static const char *ImpliedExtsXTHeadVdot[] = {"v"};
static const char *ImpliedExtsXsfvcp[] = {"zve32x"};
static const char *ImpliedExtsXsfvfnrclipxfqf[] = {"zve32f"};
static const char *ImpliedExtsXsfvfwmaccqqq[] = {"zve32f"};
static const char *ImpliedExtsXsfvqmaccdod[] = {"zve32x"};
static const char *ImpliedExtsXsfvqmaccqoq[] = {"zve32x"};
Expand Down Expand Up @@ -1064,6 +1066,7 @@ static constexpr ImpliedExtsEntry ImpliedExts[] = {
{{"f"}, {ImpliedExtsF}},
{{"v"}, {ImpliedExtsV}},
{{"xsfvcp"}, {ImpliedExtsXsfvcp}},
{{"xsfvfnrclipxfqf"}, {ImpliedExtsXsfvfnrclipxfqf}},
{{"xsfvfwmaccqqq"}, {ImpliedExtsXsfvfwmaccqqq}},
{{"xsfvqmaccdod"}, {ImpliedExtsXsfvqmaccdod}},
{{"xsfvqmaccqoq"}, {ImpliedExtsXsfvqmaccqoq}},
Expand Down
3 changes: 3 additions & 0 deletions llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,9 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
TRY_TO_DECODE_FEATURE(
RISCV::FeatureVendorXSfvfwmaccqqq, DecoderTableXSfvfwmaccqqq32,
"SiFive Matrix Multiplication Instruction opcode table");
TRY_TO_DECODE_FEATURE(
RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32,
"SiFive FP32-to-int8 Ranged Clip Instructions opcode table");
TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfcie, DecoderTableXSfcie32,
"Sifive CIE custom opcode table");
TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXCVbitmanip,
Expand Down
8 changes: 8 additions & 0 deletions llvm/lib/Target/RISCV/RISCVFeatures.td
Original file line number Diff line number Diff line change
Expand Up @@ -844,6 +844,14 @@ def HasVendorXSfvfwmaccqqq : Predicate<"Subtarget->hasVendorXSfvfwmaccqqq()">,
AssemblerPredicate<(all_of FeatureVendorXSfvfwmaccqqq),
"'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction and 4-by-4))">;

def FeatureVendorXSfvfnrclipxfqf
: SubtargetFeature<"xsfvfnrclipxfqf", "HasVendorXSfvfnrclipxfqf", "true",
"'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)",
[FeatureStdExtZve32f]>;
def HasVendorXSfvfnrclipxfqf : Predicate<"Subtarget->hasVendorXSfvfnrclipxfqf()">,
AssemblerPredicate<(all_of FeatureVendorXSfvfnrclipxfqf),
"'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)">;

def FeatureVendorXCVbitmanip
: SubtargetFeature<"xcvbitmanip", "HasVendorXCVbitmanip", "true",
"'XCVbitmanip' (CORE-V Bit Manipulation)">;
Expand Down
49 changes: 49 additions & 0 deletions llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,11 @@ class CustomSiFiveVMACC<bits<6> funct6, RISCVVFormat opv, string opcodestr>
}
}

class CustomSiFiveVFNRCLIP<bits<6> funct6, RISCVVFormat opv, string opcodestr>
: VALUVF<funct6, opv, opcodestr> {
let Inst{6-0} = OPC_CUSTOM_2.Value;
}

let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0,
hasSideEffects = 1, hasNoSchedulingInfo = 1, DecoderNamespace = "XSfvcp" in {
defm X : CustomSiFiveVCIX<"x", VCIX_X, uimm5, uimm5, GPR>, Sched<[]>;
Expand Down Expand Up @@ -212,6 +217,10 @@ let Predicates = [HasVendorXSfvfwmaccqqq], DecoderNamespace = "XSfvfwmaccqqq" in
def VFWMACC_4x4x4 : CustomSiFiveVMACC<0b111100, OPFVV, "sf.vfwmacc.4x4x4">;
}

let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvfnrclipxfqf" in {
def VFNRCLIP_XU_F_QF : CustomSiFiveVFNRCLIP<0b100010, OPFVF, "sf.vfnrclip.xu.f.qf">;
def VFNRCLIP_X_F_QF : CustomSiFiveVFNRCLIP<0b100011, OPFVF, "sf.vfnrclip.x.f.qf">;
}
class VPseudoVC_X<Operand OpClass, DAGOperand RS1Class,
bit HasSideEffect = 1> :
Pseudo<(outs),
Expand Down Expand Up @@ -350,6 +359,16 @@ multiclass VPseudoSiFiveVFWMACC<string Constraint = ""> {
defm NAME : VPseudoSiFiveVMACC<m.MX, m.wvrclass, m.vrclass, Constraint>;
}

multiclass VPseudoSiFiveVFNRCLIP<string Constraint = "@earlyclobber $rd"> {
foreach m = MxListVF4 in
let hasSideEffects = 0 in
defm "Pseudo" # NAME : VPseudoBinaryRoundingMode<!if(!eq(m.vrclass, VRM8),
VRM2, VR),
m.vrclass, FPR32, m,
Constraint, /*sew*/0,
UsesVXRM=0>;
}

let Predicates = [HasVendorXSfvcp] in {
foreach m = MxList in {
defm X : VPseudoVC_X<m, GPR>;
Expand Down Expand Up @@ -396,6 +415,11 @@ let Predicates = [HasVendorXSfvfwmaccqqq], DecoderNamespace = "XSfvfwmaccqqq" in
defm VFWMACC_4x4x4 : VPseudoSiFiveVFWMACC;
}

let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvfnrclipxfqf" in {
defm VFNRCLIP_XU_F_QF : VPseudoSiFiveVFNRCLIP;
defm VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP;
}

class VPatVC_OP4<string intrinsic_name,
string inst,
ValueType op2_type,
Expand Down Expand Up @@ -555,6 +579,26 @@ multiclass VPatVFWMACC<string intrinsic, string instruction, string kind>
: VPatVMACC<intrinsic, instruction, kind, AllWidenableBFloatToFloatVectors,
vbfloat16m1_t>;

defset list<VTypeInfoToWide> VFNRCLIPInfoPairs = {
def : VTypeInfoToWide<VI8MF8, VF32MF2>;
def : VTypeInfoToWide<VI8MF4, VF32M1>;
def : VTypeInfoToWide<VI8MF2, VF32M2>;
def : VTypeInfoToWide<VI8M1, VF32M4>;
def : VTypeInfoToWide<VI8M2, VF32M8>;
}

multiclass VPatVFNRCLIP<string intrinsic, string instruction> {
foreach pair = VFNRCLIPInfoPairs in {
defvar Vti = pair.Vti;
defvar Wti = pair.Wti;
defm : VPatBinaryRoundingMode<"int_riscv_sf_" # intrinsic,
"Pseudo" # instruction # "_" # Wti.LMul.MX,
Vti.Vector, Wti.Vector, Wti.Scalar, Vti.Mask,
Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, Wti.ScalarRegClass>;
}
}

let Predicates = [HasVendorXSfvcp] in {
foreach vti = AllVectors in {
defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>;
Expand Down Expand Up @@ -608,6 +652,11 @@ let Predicates = [HasVendorXSfvfwmaccqqq] in {
defm : VPatVFWMACC<"vfwmacc_4x4x4", "VFWMACC", "4x4x4">;
}

let Predicates = [HasVendorXSfvfnrclipxfqf] in {
defm : VPatVFNRCLIP<"vfnrclip_xu_f_qf", "VFNRCLIP_XU_F_QF">;
defm : VPatVFNRCLIP<"vfnrclip_x_f_qf", "VFNRCLIP_X_F_QF">;
}

let Predicates = [HasVendorXSfcie] in {
let hasSideEffects = 1, mayLoad = 0, mayStore = 0, DecoderNamespace = "XSfcie" in {
def SF_CFLUSH_D_L1 : RVInstI<0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1), "cflush.d.l1","$rs1">,
Expand Down
260 changes: 260 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,260 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s

declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8>,
<vscale x 1 x float>,
float,
iXLen, iXLen);

define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8> undef,
<vscale x 1 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 1 x i8> %a
}

declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 1 x i8> %a
}

declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8>,
<vscale x 2 x float>,
float,
iXLen, iXLen);

define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8> undef,
<vscale x 2 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 2 x i8> %a
}

declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 2 x i8> %a
}

declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8>,
<vscale x 4 x float>,
float,
iXLen, iXLen);

define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v10, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8> undef,
<vscale x 4 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 4 x i8> %a
}

declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v10, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 4 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8>,
<vscale x 8 x float>,
float,
iXLen, iXLen);

define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v12, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8> undef,
<vscale x 8 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v12, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 8 x i8> %a
}

declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8>,
<vscale x 16 x float>,
float,
iXLen, iXLen);

define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v16, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv2r.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8> undef,
<vscale x 16 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 16 x i8> %a
}

declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8>,
<vscale x 16 x float>,
float,
<vscale x 16 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v16, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8> %0,
<vscale x 16 x float> %1,
float %2,
<vscale x 16 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 16 x i8> %a
}
260 changes: 260 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,260 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s

declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8>,
<vscale x 1 x float>,
float,
iXLen, iXLen);

define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8> undef,
<vscale x 1 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 1 x i8> %a
}

declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
<vscale x 1 x i8> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 1 x i8> %a
}

declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8>,
<vscale x 2 x float>,
float,
iXLen, iXLen);

define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8> undef,
<vscale x 2 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 2 x i8> %a
}

declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
<vscale x 2 x i8> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 2 x i8> %a
}

declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8>,
<vscale x 4 x float>,
float,
iXLen, iXLen);

define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v10, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8> undef,
<vscale x 4 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 4 x i8> %a
}

declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v10, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
<vscale x 4 x i8> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 4 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8>,
<vscale x 8 x float>,
float,
iXLen, iXLen);

define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v12, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8> undef,
<vscale x 8 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v12, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
<vscale x 8 x i8> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 8 x i8> %a
}

declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8>,
<vscale x 16 x float>,
float,
iXLen, iXLen);

define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v16, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv2r.v v8, v16
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8> undef,
<vscale x 16 x float> %0,
float %1,
iXLen 0, iXLen %2)

ret <vscale x 16 x i8> %a
}

declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8>,
<vscale x 16 x float>,
float,
<vscale x 16 x i1>,
iXLen, iXLen, iXLen);

define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v16, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
<vscale x 16 x i8> %0,
<vscale x 16 x float> %1,
float %2,
<vscale x 16 x i1> %3,
iXLen 0, iXLen %4, iXLen 1)

ret <vscale x 16 x i8> %a
}
33 changes: 33 additions & 0 deletions llvm/test/MC/RISCV/rvv/xsfvfnrclip.s
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v,+xsfvfnrclipxfqf %s \
# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v,+xsfvfnrclipxfqf %s \
# RUN: | llvm-objdump -d --mattr=+v,+xsfvfnrclipxfqf - \
# RUN: | FileCheck %s --check-prefix=CHECK-INST
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v,+xsfvfnrclipxfqf %s \
# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN

sf.vfnrclip.xu.f.qf v4, v8, fa2
# CHECK-INST: sf.vfnrclip.xu.f.qf v4, v8, fa2
# CHECK-ENCODING: [0x5b,0x52,0x86,0x8a]
# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
# CHECK-UNKNOWN: 5b 52 86 8a <unknown>

sf.vfnrclip.xu.f.qf v4, v8, fa2, v0.t
# CHECK-INST: sf.vfnrclip.xu.f.qf v4, v8, fa2
# CHECK-ENCODING: [0x5b,0x52,0x86,0x88]
# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
# CHECK-UNKNOWN: 5b 52 86 88 <unknown>

sf.vfnrclip.x.f.qf v4, v8, fa2
# CHECK-INST: sf.vfnrclip.x.f.qf v4, v8, fa2
# CHECK-ENCODING: [0x5b,0x52,0x86,0x8e]
# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
# CHECK-UNKNOWN: 5b 52 86 8e <unknown>

sf.vfnrclip.x.f.qf v4, v8, fa2, v0.t
# CHECK-INST: sf.vfnrclip.x.f.qf v4, v8, fa2
# CHECK-ENCODING: [0x5b,0x52,0x86,0x8c]
# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
# CHECK-UNKNOWN: 5b 52 86 8c <unknown>
1 change: 1 addition & 0 deletions llvm/unittests/Support/RISCVISAInfoTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -714,6 +714,7 @@ R"(All available -march extensions for RISC-V
xcvsimd 1.0
xsfcie 1.0
xsfvcp 1.0
xsfvfnrclipxfqf 1.0
xsfvfwmaccqqq 1.0
xsfvqmaccdod 1.0
xsfvqmaccqoq 1.0
Expand Down