diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll index 1acdc7f06e3c6..5428dbcc9f187 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll @@ -1,32 +1,32 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare i32 @llvm.riscv.vfirst.i64.nxv1i1( +declare i32 @llvm.riscv.vfirst.i32.nxv1i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv1i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv1i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv1i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv1i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv1i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv1i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv1i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv1i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv1i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv1i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv1i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv1i1( %0, %1, i32 %2) @@ -34,33 +34,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vfirst.i64.nxv2i1( +declare i32 @llvm.riscv.vfirst.i32.nxv2i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv2i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv2i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv2i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv2i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv2i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv2i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv2i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv2i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv2i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv2i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv2i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv2i1( %0, %1, i32 %2) @@ -68,33 +68,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vfirst.i64.nxv4i1( +declare i32 @llvm.riscv.vfirst.i32.nxv4i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv4i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv4i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv4i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv4i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv4i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv4i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv4i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv4i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv4i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv4i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv4i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv4i1( %0, %1, i32 %2) @@ -102,33 +102,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vfirst.i64.nxv8i1( +declare i32 @llvm.riscv.vfirst.i32.nxv8i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv8i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv8i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv8i1 ; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv8i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv8i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv8i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv8i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv8i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv8i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv8i1 ; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv8i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv8i1( %0, %1, i32 %2) @@ -136,33 +136,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vfirst.i64.nxv16i1( +declare i32 @llvm.riscv.vfirst.i32.nxv16i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv16i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv16i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv16i1 ; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv16i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv16i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv16i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv16i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv16i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv16i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv16i1 ; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv16i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv16i1( %0, %1, i32 %2) @@ -170,33 +170,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vfirst.i64.nxv32i1( +declare i32 @llvm.riscv.vfirst.i32.nxv32i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv32i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv32i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv32i1 ; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv32i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv32i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv32i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv32i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv32i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv32i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv32i1 ; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv32i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv32i1( %0, %1, i32 %2) @@ -204,33 +204,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vfirst.i64.nxv64i1( +declare i32 @llvm.riscv.vfirst.i32.nxv64i1( , i32); -define i32 @intrinsic_vfirst_m_i64_nxv64i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vfirst_m_i32_nxv64i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1 +; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv64i1 ; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vfirst.i64.nxv64i1( + %a = call i32 @llvm.riscv.vfirst.i32.nxv64i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vfirst.mask.i64.nxv64i1( +declare i32 @llvm.riscv.vfirst.mask.i32.nxv64i1( , , i32); -define i32 @intrinsic_vfirst_mask_m_i64_nxv64i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vfirst_mask_m_i32_nxv64i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1 +; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv64i1 ; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu ; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv64i1( + %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv64i1( %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll index 521c6b4ccbf9d..b3a095d34572d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll @@ -1,419 +1,419 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -target-abi ilp32d -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv1f16( half, i32); -define @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv2f16( half, i32); -define @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv4f16( half, i32); -define @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16 ; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv8f16( half, i32); -define @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16 ; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv16f16( half, i32); -define @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16 ; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv32f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv32f16( half, i32); -define @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16 ; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv1f32( float, i32); -define @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32 ; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv2f32( float, i32); -define @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32 ; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv4f32( float, i32); -define @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32 ; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv8f32( float, i32); -define @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32 ; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv16f32( float, i32); -define @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32 ; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv1f64( double, i32); -define @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64 ; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv2f64( double, i32); -define @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64 ; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv4f64( double, i32); -define @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64 ; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double %0, i32 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv8f64( double, i32); -define @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64 ; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double %0, i32 %1) ret %a } -define @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i32 %0) nounwind { +define @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv2f16_f16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv2f16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv4f16_f16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv4f16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16 ; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv8f16_f16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv8f16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16 ; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv16f16_f16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv16f16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16 ; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv32f16_f16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv32f16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16 ; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv1f32_f32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv1f32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32 ; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv2f32_f32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv2f32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32 ; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv4f32_f32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv4f32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32 ; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv8f32_f32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv8f32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32 ; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv16f32_f32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv16f32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32 ; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv1f64_f64(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv1f64(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64 ; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv2f64_f64(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv2f64(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64 ; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv4f64_f64(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv4f64(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64 ; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double 0.0, i32 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv8f64_f64(i32 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv8f64(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64 ; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double 0.0, i32 %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll index 525fa09275262..f781c1f0397bb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll @@ -1,419 +1,419 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv1f16( half, i64); -define @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv2f16( half, i64); -define @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv4f16( half, i64); -define @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16 ; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv8f16( half, i64); -define @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16 ; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv16f16( half, i64); -define @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16 ; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv32f16.f16( +declare @llvm.riscv.vfmv.v.f.nxv32f16( half, i64); -define @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16 ; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv1f32( float, i64); -define @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32 ; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv2f32( float, i64); -define @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32 ; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv4f32( float, i64); -define @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32 ; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv8f32( float, i64); -define @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32 ; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f32.f32( +declare @llvm.riscv.vfmv.v.f.nxv16f32( float, i64); -define @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32 ; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv1f64( double, i64); -define @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64 ; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv2f64( double, i64); -define @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64 ; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv4f64( double, i64); -define @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64 ; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double %0, i64 %1) ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f64.f64( +declare @llvm.riscv.vfmv.v.f.nxv8f64( double, i64); -define @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i64 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64 +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64 ; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu ; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 - %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double %0, i64 %1) ret %a } -define @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i64 %0) nounwind { +define @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv2f16_f16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv2f16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16 ; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv4f16_f16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv4f16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16 ; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv8f16_f16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv8f16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16 ; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv16f16_f16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv16f16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16 ; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv32f16_f16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv32f16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16 ; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv1f32_f32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv1f32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32 ; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv2f32_f32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv2f32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32 ; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv4f32_f32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv4f32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32 ; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv8f32_f32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv8f32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32 ; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv16f32_f32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv16f32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32 ; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv1f64_f64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv1f64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64 ; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv2f64_f64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv2f64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64 ; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv4f64_f64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv4f64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64 ; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double 0.0, i64 %0) ret %a } -define @intrinsic_vmv.v.x_zero_nxv8f64_f64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_zero_nxv8f64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64 +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64 ; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu ; CHECK: vmv.v.x {{v[0-9]+}}, zero - %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double 0.0, i64 %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll index 6d2fa56686f86..6be0bf7e92063 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll index 2977a633a4a37..dd02b52375113 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1( +declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1( + %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1( +declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1( + %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1( +declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1( + %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1( +declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1( + %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll index fefa2e5de06a9..4445dfd8057ec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll index 60da063f56985..4173a465c7b53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1( +declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1( + %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1( +declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1( + %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1( +declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1( + %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1( +declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1( + %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll index e50e92a6bd20e..b3d86138d2c29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll index 948ad9f18da1e..c7e7f5b2463ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1( +declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1( + %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1( +declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1( + %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1( +declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1( + %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1( +declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1( + %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll index 87393c291ca87..6747a66556128 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll index 7000191c9a644..6079d76899272 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1( +declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1( + %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1( +declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1( + %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1( +declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1( + %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1( +declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1( + %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll index 943f2009181cc..e535188f38b69 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll @@ -1,16 +1,16 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f16( +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( , , i32); -define @intrinsic_vfwadd_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv1f16( + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( %0, %1, i32 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f16( +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( , , , , i32); -define @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv1f16( + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f16( +declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( , , i32); -define @intrinsic_vfwadd_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv2f16( + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( %0, %1, i32 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f16( +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( , , , , i32); -define @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv2f16( + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f16( +declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( , , i32); -define @intrinsic_vfwadd_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv4f16( + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( %0, %1, i32 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f16( +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( , , , , i32); -define @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv4f16( + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f16( +declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( , , i32); -define @intrinsic_vfwadd_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv8f16( + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( %0, %1, i32 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f16( +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( , , , , i32); -define @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv8f16( + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f16( +declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( , , i32); -define @intrinsic_vfwadd_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv16f16( + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( %0, %1, i32 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f16( +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( , , , , i32); -define @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv16f16( + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( %0, %1, %2, @@ -200,17 +200,177 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f16.f16( +declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( , half, i32); -define @intrinsic_vfwadd_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv1f16.f16( + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( %0, half %1, i32 %2) @@ -218,19 +378,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( , , half, , i32); -define @intrinsic_vfwadd_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv1f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( %0, %1, half %2, @@ -240,17 +400,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f16.f16( +declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( , half, i32); -define @intrinsic_vfwadd_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv2f16.f16( + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( %0, half %1, i32 %2) @@ -258,19 +418,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( , , half, , i32); -define @intrinsic_vfwadd_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv2f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( %0, %1, half %2, @@ -280,17 +440,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f16.f16( +declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( , half, i32); -define @intrinsic_vfwadd_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv4f16.f16( + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( %0, half %1, i32 %2) @@ -298,19 +458,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( , , half, , i32); -define @intrinsic_vfwadd_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv4f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( %0, %1, half %2, @@ -320,17 +480,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f16.f16( +declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( , half, i32); -define @intrinsic_vfwadd_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv8f16.f16( + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( %0, half %1, i32 %2) @@ -338,19 +498,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( , , half, , i32); -define @intrinsic_vfwadd_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv8f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( %0, %1, half %2, @@ -360,17 +520,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f16.f16( +declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( , half, i32); -define @intrinsic_vfwadd_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv16f16.f16( + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( %0, half %1, i32 %2) @@ -378,19 +538,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( , , half, , i32); -define @intrinsic_vfwadd_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv16f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( %0, %1, half %2, @@ -399,3 +559,163 @@ entry: ret %a } + +declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll index 3e7fb6474b30d..9f926fdb219c2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll @@ -1,16 +1,16 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f16( +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( , , i64); -define @intrinsic_vfwadd_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv1f16( + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( %0, %1, i64 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f16( +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv1f16( + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f16( +declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( , , i64); -define @intrinsic_vfwadd_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv2f16( + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( %0, %1, i64 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f16( +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv2f16( + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f16( +declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( , , i64); -define @intrinsic_vfwadd_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv4f16( + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( %0, %1, i64 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f16( +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv4f16( + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f16( +declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( , , i64); -define @intrinsic_vfwadd_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv8f16( + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( %0, %1, i64 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f16( +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv8f16( + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f16( +declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( , , i64); -define @intrinsic_vfwadd_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv16f16( + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( %0, %1, i64 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f16( +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv16f16( + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( %0, %1, %2, @@ -200,17 +200,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32( +declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( , , i64); -define @intrinsic_vfwadd_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f32 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv1f32( + %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( %0, %1, i64 %2) @@ -218,19 +218,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32( +declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv1f32( + %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( %0, %1, %2, @@ -240,17 +240,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32( +declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( , , i64); -define @intrinsic_vfwadd_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f32 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv2f32( + %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( %0, %1, i64 %2) @@ -258,19 +258,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32( +declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv2f32( + %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( %0, %1, %2, @@ -280,17 +280,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32( +declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( , , i64); -define @intrinsic_vfwadd_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f32 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv4f32( + %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( %0, %1, i64 %2) @@ -298,19 +298,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32( +declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv4f32( + %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( %0, %1, %2, @@ -320,17 +320,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32( +declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( , , i64); -define @intrinsic_vfwadd_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f32 +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv8f32( + %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( %0, %1, i64 %2) @@ -338,19 +338,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32( +declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( , , , , i64); -define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv8f32( + %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( %0, %1, %2, @@ -360,17 +360,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f16.f16( +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( , half, i64); -define @intrinsic_vfwadd_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv1f16.f16( + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( %0, half %1, i64 %2) @@ -378,19 +378,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( , , half, , i64); -define @intrinsic_vfwadd_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv1f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( %0, %1, half %2, @@ -400,17 +400,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f16.f16( +declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( , half, i64); -define @intrinsic_vfwadd_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv2f16.f16( + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( %0, half %1, i64 %2) @@ -418,19 +418,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( , , half, , i64); -define @intrinsic_vfwadd_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv2f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( %0, %1, half %2, @@ -440,17 +440,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f16.f16( +declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( , half, i64); -define @intrinsic_vfwadd_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv4f16.f16( + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( %0, half %1, i64 %2) @@ -458,19 +458,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( , , half, , i64); -define @intrinsic_vfwadd_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv4f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( %0, %1, half %2, @@ -480,17 +480,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f16.f16( +declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( , half, i64); -define @intrinsic_vfwadd_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv8f16.f16( + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( %0, half %1, i64 %2) @@ -498,19 +498,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( , , half, , i64); -define @intrinsic_vfwadd_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv8f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( %0, %1, half %2, @@ -520,17 +520,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f16.f16( +declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( , half, i64); -define @intrinsic_vfwadd_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv16f16.f16( + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( %0, half %1, i64 %2) @@ -538,19 +538,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f16.f16( +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( , , half, , i64); -define @intrinsic_vfwadd_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv16f16.f16( + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( %0, %1, half %2, @@ -560,17 +560,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.f32( +declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( , float, i64); -define @intrinsic_vfwadd_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv1f32.f32( + %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( %0, float %1, i64 %2) @@ -578,19 +578,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.f32( +declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( , , float, , i64); -define @intrinsic_vfwadd_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv1f32.f32( + %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( %0, %1, float %2, @@ -600,17 +600,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.f32( +declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( , float, i64); -define @intrinsic_vfwadd_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv2f32.f32( + %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( %0, float %1, i64 %2) @@ -618,19 +618,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.f32( +declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( , , float, , i64); -define @intrinsic_vfwadd_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv2f32.f32( + %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( %0, %1, float %2, @@ -640,17 +640,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.f32( +declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( , float, i64); -define @intrinsic_vfwadd_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv4f32.f32( + %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( %0, float %1, i64 %2) @@ -658,19 +658,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.f32( +declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( , , float, , i64); -define @intrinsic_vfwadd_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv4f32.f32( + %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( %0, %1, float %2, @@ -680,17 +680,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.f32( +declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( , float, i64); -define @intrinsic_vfwadd_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwadd.nxv8f32.f32( + %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( %0, float %1, i64 %2) @@ -698,19 +698,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.f32( +declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( , , float, , i64); -define @intrinsic_vfwadd_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.mask.nxv8f32.f32( + %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( %0, %1, float %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll index fae0b94a6c0bb..ff391c882fc90 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -1,16 +1,16 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f16( +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , , i32); -define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv1f16( + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( %0, %1, i32 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f16( +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( , , , , i32); -define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f16( +declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( , , i32); -define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv2f16( + %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( %0, %1, i32 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f16( +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( , , , , i32); -define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f16( +declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( , , i32); -define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv4f16( + %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( %0, %1, i32 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f16( +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( , , , , i32); -define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f16( +declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( , , i32); -define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv8f16( + %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( %0, %1, i32 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f16( +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( , , , , i32); -define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f16( +declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( , , i32); -define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv16f16( + %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( %0, %1, i32 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f16( +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( , , , , i32); -define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( %0, %1, %2, @@ -200,14 +200,174 @@ entry: ret %a } +declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + declare @llvm.riscv.vfwadd.w.nxv1f32.f16( , half, i32); -define @intrinsic_vfwadd.w_wf_nxv1f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( @@ -225,9 +385,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( , i32); -define @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( @@ -245,9 +405,9 @@ declare @llvm.riscv.vfwadd.w.nxv2f32.f16( half, i32); -define @intrinsic_vfwadd.w_wf_nxv2f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( @@ -265,9 +425,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( , i32); -define @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( @@ -285,9 +445,9 @@ declare @llvm.riscv.vfwadd.w.nxv4f32.f16( half, i32); -define @intrinsic_vfwadd.w_wf_nxv4f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( @@ -305,9 +465,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( , i32); -define @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( @@ -325,9 +485,9 @@ declare @llvm.riscv.vfwadd.w.nxv8f32.f16( half, i32); -define @intrinsic_vfwadd.w_wf_nxv8f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( @@ -345,9 +505,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( , i32); -define @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( @@ -365,9 +525,9 @@ declare @llvm.riscv.vfwadd.w.nxv16f32.f16( half, i32); -define @intrinsic_vfwadd.w_wf_nxv16f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( @@ -385,9 +545,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( , i32); -define @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( @@ -399,3 +559,163 @@ entry: ret %a } + +declare @llvm.riscv.vfwadd.w.nxv1f64.f32( + , + float, + i32); + +define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f64.f32( + , + float, + i32); + +define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f64.f32( + , + float, + i32); + +define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f64.f32( + , + float, + i32); + +define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll index 02ab0ad81baf1..bc108823ce6f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -1,16 +1,16 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f16( +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , , i64); -define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv1f16( + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( %0, %1, i64 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f16( +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f16( +declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( , , i64); -define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv2f16( + %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( %0, %1, i64 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f16( +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f16( +declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( , , i64); -define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv4f16( + %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( %0, %1, i64 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f16( +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f16( +declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( , , i64); -define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv8f16( + %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( %0, %1, i64 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f16( +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f16( +declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( , , i64); -define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv16f16( + %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( %0, %1, i64 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f16( +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f16( + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( %0, %1, %2, @@ -200,17 +200,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32( +declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( , , i64); -define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv1f32( + %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( %0, %1, i64 %2) @@ -218,19 +218,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32( +declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32( + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( %0, %1, %2, @@ -240,17 +240,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32( +declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( , , i64); -define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv2f32( + %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( %0, %1, i64 %2) @@ -258,19 +258,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32( +declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32( + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( %0, %1, %2, @@ -280,17 +280,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32( +declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( , , i64); -define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv4f32( + %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( %0, %1, i64 %2) @@ -298,19 +298,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32( +declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32( + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( %0, %1, %2, @@ -320,17 +320,17 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32( +declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( , , i64); -define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwadd.w.nxv8f32( + %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( %0, %1, i64 %2) @@ -338,19 +338,19 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32( +declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( , , , , i64); -define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32( + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( %0, %1, %2, @@ -365,9 +365,9 @@ declare @llvm.riscv.vfwadd.w.nxv1f32.f16( half, i64); -define @intrinsic_vfwadd.w_wf_nxv1f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( @@ -385,9 +385,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( @@ -405,9 +405,9 @@ declare @llvm.riscv.vfwadd.w.nxv2f32.f16( half, i64); -define @intrinsic_vfwadd.w_wf_nxv2f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( @@ -425,9 +425,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( @@ -445,9 +445,9 @@ declare @llvm.riscv.vfwadd.w.nxv4f32.f16( half, i64); -define @intrinsic_vfwadd.w_wf_nxv4f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( @@ -465,9 +465,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( @@ -485,9 +485,9 @@ declare @llvm.riscv.vfwadd.w.nxv8f32.f16( half, i64); -define @intrinsic_vfwadd.w_wf_nxv8f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( @@ -505,9 +505,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( @@ -525,9 +525,9 @@ declare @llvm.riscv.vfwadd.w.nxv16f32.f16( half, i64); -define @intrinsic_vfwadd.w_wf_nxv16f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( @@ -545,9 +545,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( @@ -565,9 +565,9 @@ declare @llvm.riscv.vfwadd.w.nxv1f64.f32( float, i64); -define @intrinsic_vfwadd.w_wf_nxv1f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( @@ -585,9 +585,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( @@ -605,9 +605,9 @@ declare @llvm.riscv.vfwadd.w.nxv2f64.f32( float, i64); -define @intrinsic_vfwadd.w_wf_nxv2f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( @@ -625,9 +625,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( @@ -645,9 +645,9 @@ declare @llvm.riscv.vfwadd.w.nxv4f64.f32( float, i64); -define @intrinsic_vfwadd.w_wf_nxv4f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( @@ -665,9 +665,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( @@ -685,9 +685,9 @@ declare @llvm.riscv.vfwadd.w.nxv8f64.f32( float, i64); -define @intrinsic_vfwadd.w_wf_nxv8f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( @@ -705,9 +705,9 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( , i64); -define @intrinsic_vfwadd.w_mask_wf_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_f32 +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll index b4d3d4086811d..f5f0dae103d7b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll @@ -1,16 +1,16 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f16( +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( , , i32); -define @intrinsic_vfwmul_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv1f16( + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( %0, %1, i32 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f16( +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( , , , , i32); -define @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv1f16( + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f16( +declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( , , i32); -define @intrinsic_vfwmul_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv2f16( + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( %0, %1, i32 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f16( +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( , , , , i32); -define @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv2f16( + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f16( +declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( , , i32); -define @intrinsic_vfwmul_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv4f16( + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( %0, %1, i32 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f16( +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( , , , , i32); -define @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv4f16( + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f16( +declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( , , i32); -define @intrinsic_vfwmul_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv8f16( + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( %0, %1, i32 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f16( +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( , , , , i32); -define @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv8f16( + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f16( +declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( , , i32); -define @intrinsic_vfwmul_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv16f16( + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( %0, %1, i32 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f16( +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( , , , , i32); -define @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv16f16( + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( %0, %1, %2, @@ -200,17 +200,177 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f16.f16( +declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( , half, i32); -define @intrinsic_vfwmul_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv1f16.f16( + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( %0, half %1, i32 %2) @@ -218,19 +378,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( , , half, , i32); -define @intrinsic_vfwmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv1f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( %0, %1, half %2, @@ -240,17 +400,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f16.f16( +declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( , half, i32); -define @intrinsic_vfwmul_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv2f16.f16( + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( %0, half %1, i32 %2) @@ -258,19 +418,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( , , half, , i32); -define @intrinsic_vfwmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv2f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( %0, %1, half %2, @@ -280,17 +440,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f16.f16( +declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( , half, i32); -define @intrinsic_vfwmul_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv4f16.f16( + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( %0, half %1, i32 %2) @@ -298,19 +458,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( , , half, , i32); -define @intrinsic_vfwmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv4f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( %0, %1, half %2, @@ -320,17 +480,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f16.f16( +declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( , half, i32); -define @intrinsic_vfwmul_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv8f16.f16( + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( %0, half %1, i32 %2) @@ -338,19 +498,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( , , half, , i32); -define @intrinsic_vfwmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv8f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( %0, %1, half %2, @@ -360,17 +520,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f16.f16( +declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( , half, i32); -define @intrinsic_vfwmul_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv16f16.f16( + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( %0, half %1, i32 %2) @@ -378,19 +538,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( , , half, , i32); -define @intrinsic_vfwmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv16f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( %0, %1, half %2, @@ -399,3 +559,163 @@ entry: ret %a } + +declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll index f84739475ee6e..81d432b8f3998 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll @@ -1,16 +1,16 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f16( +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( , , i64); -define @intrinsic_vfwmul_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv1f16( + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( %0, %1, i64 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f16( +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv1f16( + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f16( +declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( , , i64); -define @intrinsic_vfwmul_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv2f16( + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( %0, %1, i64 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f16( +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv2f16( + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f16( +declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( , , i64); -define @intrinsic_vfwmul_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv4f16( + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( %0, %1, i64 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f16( +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv4f16( + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f16( +declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( , , i64); -define @intrinsic_vfwmul_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv8f16( + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( %0, %1, i64 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f16( +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv8f16( + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f16( +declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( , , i64); -define @intrinsic_vfwmul_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv16f16( + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( %0, %1, i64 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f16( +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv16f16( + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( %0, %1, %2, @@ -200,17 +200,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32( +declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( , , i64); -define @intrinsic_vfwmul_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f32 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv1f32( + %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( %0, %1, i64 %2) @@ -218,19 +218,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32( +declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv1f32( + %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( %0, %1, %2, @@ -240,17 +240,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32( +declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( , , i64); -define @intrinsic_vfwmul_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f32 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv2f32( + %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( %0, %1, i64 %2) @@ -258,19 +258,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32( +declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv2f32( + %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( %0, %1, %2, @@ -280,17 +280,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32( +declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( , , i64); -define @intrinsic_vfwmul_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f32 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv4f32( + %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( %0, %1, i64 %2) @@ -298,19 +298,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32( +declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv4f32( + %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( %0, %1, %2, @@ -320,17 +320,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32( +declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( , , i64); -define @intrinsic_vfwmul_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f32 +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv8f32( + %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( %0, %1, i64 %2) @@ -338,19 +338,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32( +declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( , , , , i64); -define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv8f32( + %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( %0, %1, %2, @@ -360,17 +360,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f16.f16( +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( , half, i64); -define @intrinsic_vfwmul_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv1f16.f16( + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( %0, half %1, i64 %2) @@ -378,19 +378,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( , , half, , i64); -define @intrinsic_vfwmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv1f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( %0, %1, half %2, @@ -400,17 +400,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f16.f16( +declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( , half, i64); -define @intrinsic_vfwmul_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv2f16.f16( + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( %0, half %1, i64 %2) @@ -418,19 +418,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( , , half, , i64); -define @intrinsic_vfwmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv2f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( %0, %1, half %2, @@ -440,17 +440,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f16.f16( +declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( , half, i64); -define @intrinsic_vfwmul_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv4f16.f16( + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( %0, half %1, i64 %2) @@ -458,19 +458,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( , , half, , i64); -define @intrinsic_vfwmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv4f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( %0, %1, half %2, @@ -480,17 +480,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f16.f16( +declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( , half, i64); -define @intrinsic_vfwmul_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv8f16.f16( + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( %0, half %1, i64 %2) @@ -498,19 +498,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( , , half, , i64); -define @intrinsic_vfwmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv8f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( %0, %1, half %2, @@ -520,17 +520,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f16.f16( +declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( , half, i64); -define @intrinsic_vfwmul_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv16f16.f16( + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( %0, half %1, i64 %2) @@ -538,19 +538,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f16.f16( +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( , , half, , i64); -define @intrinsic_vfwmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv16f16.f16( + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( %0, %1, half %2, @@ -560,17 +560,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.f32( +declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( , float, i64); -define @intrinsic_vfwmul_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv1f32.f32( + %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( %0, float %1, i64 %2) @@ -578,19 +578,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.f32( +declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( , , float, , i64); -define @intrinsic_vfwmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv1f32.f32( + %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( %0, %1, float %2, @@ -600,17 +600,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.f32( +declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( , float, i64); -define @intrinsic_vfwmul_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv2f32.f32( + %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( %0, float %1, i64 %2) @@ -618,19 +618,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.f32( +declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( , , float, , i64); -define @intrinsic_vfwmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv2f32.f32( + %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( %0, %1, float %2, @@ -640,17 +640,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.f32( +declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( , float, i64); -define @intrinsic_vfwmul_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv4f32.f32( + %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( %0, float %1, i64 %2) @@ -658,19 +658,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.f32( +declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( , , float, , i64); -define @intrinsic_vfwmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv4f32.f32( + %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( %0, %1, float %2, @@ -680,17 +680,17 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.f32( +declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( , float, i64); -define @intrinsic_vfwmul_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwmul.nxv8f32.f32( + %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( %0, float %1, i64 %2) @@ -698,19 +698,19 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.f32( +declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( , , float, , i64); -define @intrinsic_vfwmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwmul.mask.nxv8f32.f32( + %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( %0, %1, float %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll index 91b86cf4058b4..2ff4a0e85bcbf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1( +declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfwredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll index 1a9b1cde8226a..9ba4bf1f56365 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1( +declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfwredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv16i1( +declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfwredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll index dcc16b1b07ea3..0ee095224c949 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1( +declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfwredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll index ce2eb047c1b0a..48041740443d2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1( +declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vfwredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv16i1( +declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vfwredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll index e54016962967c..40af29b0e9810 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll @@ -1,16 +1,16 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f16( +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( , , i32); -define @intrinsic_vfwsub_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv1f16( + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( %0, %1, i32 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f16( +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( , , , , i32); -define @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv1f16( + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f16( +declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( , , i32); -define @intrinsic_vfwsub_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv2f16( + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( %0, %1, i32 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f16( +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( , , , , i32); -define @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv2f16( + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f16( +declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( , , i32); -define @intrinsic_vfwsub_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv4f16( + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( %0, %1, i32 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f16( +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( , , , , i32); -define @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv4f16( + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f16( +declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( , , i32); -define @intrinsic_vfwsub_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv8f16( + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( %0, %1, i32 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f16( +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( , , , , i32); -define @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv8f16( + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f16( +declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( , , i32); -define @intrinsic_vfwsub_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv16f16( + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( %0, %1, i32 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f16( +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( , , , , i32); -define @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv16f16( + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( %0, %1, %2, @@ -200,17 +200,177 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f16.f16( +declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( , half, i32); -define @intrinsic_vfwsub_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv1f16.f16( + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( %0, half %1, i32 %2) @@ -218,19 +378,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( , , half, , i32); -define @intrinsic_vfwsub_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv1f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( %0, %1, half %2, @@ -240,17 +400,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f16.f16( +declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( , half, i32); -define @intrinsic_vfwsub_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv2f16.f16( + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( %0, half %1, i32 %2) @@ -258,19 +418,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( , , half, , i32); -define @intrinsic_vfwsub_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv2f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( %0, %1, half %2, @@ -280,17 +440,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f16.f16( +declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( , half, i32); -define @intrinsic_vfwsub_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv4f16.f16( + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( %0, half %1, i32 %2) @@ -298,19 +458,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( , , half, , i32); -define @intrinsic_vfwsub_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv4f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( %0, %1, half %2, @@ -320,17 +480,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f16.f16( +declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( , half, i32); -define @intrinsic_vfwsub_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv8f16.f16( + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( %0, half %1, i32 %2) @@ -338,19 +498,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( , , half, , i32); -define @intrinsic_vfwsub_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv8f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( %0, %1, half %2, @@ -360,17 +520,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f16.f16( +declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( , half, i32); -define @intrinsic_vfwsub_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv16f16.f16( + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( %0, half %1, i32 %2) @@ -378,19 +538,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( , , half, , i32); -define @intrinsic_vfwsub_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv16f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( %0, %1, half %2, @@ -399,3 +559,163 @@ entry: ret %a } + +declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll index 9226f32165321..a3c0f7ca71b06 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll @@ -1,16 +1,16 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f16( +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( , , i64); -define @intrinsic_vfwsub_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv1f16( + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( %0, %1, i64 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f16( +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv1f16( + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f16( +declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( , , i64); -define @intrinsic_vfwsub_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv2f16( + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( %0, %1, i64 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f16( +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv2f16( + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f16( +declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( , , i64); -define @intrinsic_vfwsub_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv4f16( + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( %0, %1, i64 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f16( +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv4f16( + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f16( +declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( , , i64); -define @intrinsic_vfwsub_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv8f16( + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( %0, %1, i64 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f16( +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv8f16( + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f16( +declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( , , i64); -define @intrinsic_vfwsub_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv16f16( + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( %0, %1, i64 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f16( +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv16f16( + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( %0, %1, %2, @@ -200,17 +200,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32( +declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( , , i64); -define @intrinsic_vfwsub_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f32 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv1f32( + %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( %0, %1, i64 %2) @@ -218,19 +218,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32( +declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv1f32( + %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( %0, %1, %2, @@ -240,17 +240,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32( +declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( , , i64); -define @intrinsic_vfwsub_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f32 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv2f32( + %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( %0, %1, i64 %2) @@ -258,19 +258,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32( +declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv2f32( + %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( %0, %1, %2, @@ -280,17 +280,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32( +declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( , , i64); -define @intrinsic_vfwsub_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f32 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv4f32( + %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( %0, %1, i64 %2) @@ -298,19 +298,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32( +declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv4f32( + %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( %0, %1, %2, @@ -320,17 +320,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32( +declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( , , i64); -define @intrinsic_vfwsub_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f32 +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv8f32( + %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( %0, %1, i64 %2) @@ -338,19 +338,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32( +declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( , , , , i64); -define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv8f32( + %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( %0, %1, %2, @@ -360,17 +360,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f16.f16( +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( , half, i64); -define @intrinsic_vfwsub_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv1f16.f16( + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( %0, half %1, i64 %2) @@ -378,19 +378,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( , , half, , i64); -define @intrinsic_vfwsub_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv1f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( %0, %1, half %2, @@ -400,17 +400,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f16.f16( +declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( , half, i64); -define @intrinsic_vfwsub_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv2f16.f16( + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( %0, half %1, i64 %2) @@ -418,19 +418,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( , , half, , i64); -define @intrinsic_vfwsub_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv2f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( %0, %1, half %2, @@ -440,17 +440,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f16.f16( +declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( , half, i64); -define @intrinsic_vfwsub_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv4f16.f16( + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( %0, half %1, i64 %2) @@ -458,19 +458,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( , , half, , i64); -define @intrinsic_vfwsub_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv4f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( %0, %1, half %2, @@ -480,17 +480,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f16.f16( +declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( , half, i64); -define @intrinsic_vfwsub_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv8f16.f16( + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( %0, half %1, i64 %2) @@ -498,19 +498,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( , , half, , i64); -define @intrinsic_vfwsub_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv8f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( %0, %1, half %2, @@ -520,17 +520,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f16.f16( +declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( , half, i64); -define @intrinsic_vfwsub_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv16f16.f16( + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( %0, half %1, i64 %2) @@ -538,19 +538,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f16.f16( +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( , , half, , i64); -define @intrinsic_vfwsub_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv16f16.f16( + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( %0, %1, half %2, @@ -560,17 +560,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.f32( +declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( , float, i64); -define @intrinsic_vfwsub_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv1f32.f32( + %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( %0, float %1, i64 %2) @@ -578,19 +578,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.f32( +declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( , , float, , i64); -define @intrinsic_vfwsub_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv1f32.f32( + %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( %0, %1, float %2, @@ -600,17 +600,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.f32( +declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( , float, i64); -define @intrinsic_vfwsub_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv2f32.f32( + %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( %0, float %1, i64 %2) @@ -618,19 +618,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.f32( +declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( , , float, , i64); -define @intrinsic_vfwsub_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv2f32.f32( + %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( %0, %1, float %2, @@ -640,17 +640,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.f32( +declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( , float, i64); -define @intrinsic_vfwsub_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv4f32.f32( + %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( %0, float %1, i64 %2) @@ -658,19 +658,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.f32( +declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( , , float, , i64); -define @intrinsic_vfwsub_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv4f32.f32( + %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( %0, %1, float %2, @@ -680,17 +680,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.f32( +declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( , float, i64); -define @intrinsic_vfwsub_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} - %a = call @llvm.riscv.vfwsub.nxv8f32.f32( + %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( %0, float %1, i64 %2) @@ -698,19 +698,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.f32( +declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( , , float, , i64); -define @intrinsic_vfwsub_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_f32 +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.mask.nxv8f32.f32( + %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( %0, %1, float %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll index d50d1687a3522..5c770e2a6ad7d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -1,16 +1,16 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f16( +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , , i32); -define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv1f16( + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( %0, %1, i32 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f16( +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( , , , , i32); -define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f16( +declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( , , i32); -define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv2f16( + %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( %0, %1, i32 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f16( +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( , , , , i32); -define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f16( +declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( , , i32); -define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv4f16( + %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( %0, %1, i32 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f16( +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( , , , , i32); -define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f16( +declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( , , i32); -define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv8f16( + %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( %0, %1, i32 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f16( +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( , , , , i32); -define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f16( +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( , , i32); -define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv16f16( + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( %0, %1, i32 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f16( +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( , , , , i32); -define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( %0, %1, %2, @@ -200,14 +200,174 @@ entry: ret %a } +declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + declare @llvm.riscv.vfwsub.w.nxv1f32.f16( , half, i32); -define @intrinsic_vfwsub.w_wf_nxv1f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( @@ -225,9 +385,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( , i32); -define @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( @@ -245,9 +405,9 @@ declare @llvm.riscv.vfwsub.w.nxv2f32.f16( half, i32); -define @intrinsic_vfwsub.w_wf_nxv2f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( @@ -265,9 +425,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( , i32); -define @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( @@ -285,9 +445,9 @@ declare @llvm.riscv.vfwsub.w.nxv4f32.f16( half, i32); -define @intrinsic_vfwsub.w_wf_nxv4f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( @@ -305,9 +465,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( , i32); -define @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( @@ -325,9 +485,9 @@ declare @llvm.riscv.vfwsub.w.nxv8f32.f16( half, i32); -define @intrinsic_vfwsub.w_wf_nxv8f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( @@ -345,9 +505,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( , i32); -define @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( @@ -365,9 +525,9 @@ declare @llvm.riscv.vfwsub.w.nxv16f32.f16( half, i32); -define @intrinsic_vfwsub.w_wf_nxv16f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( @@ -385,9 +545,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( , i32); -define @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( @@ -399,3 +559,163 @@ entry: ret %a } + +declare @llvm.riscv.vfwsub.w.nxv1f64.f32( + , + float, + i32); + +define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f64.f32( + , + float, + i32); + +define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f64.f32( + , + float, + i32); + +define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f64.f32( + , + float, + i32); + +define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( + , + , + float, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll index 43245ff0a18bf..ed9f4bfd7b4ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -1,16 +1,16 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f16( +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , , i64); -define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv1f16( + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( %0, %1, i64 %2) @@ -18,19 +18,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f16( +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( %0, %1, %2, @@ -40,17 +40,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f16( +declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( , , i64); -define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv2f16( + %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( %0, %1, i64 %2) @@ -58,19 +58,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f16( +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( %0, %1, %2, @@ -80,17 +80,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f16( +declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( , , i64); -define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv4f16( + %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( %0, %1, i64 %2) @@ -98,19 +98,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f16( +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( %0, %1, %2, @@ -120,17 +120,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f16( +declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( , , i64); -define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv8f16( + %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( %0, %1, i64 %2) @@ -138,19 +138,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f16( +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( %0, %1, %2, @@ -160,17 +160,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f16( +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( , , i64); -define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv16f16( + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( %0, %1, i64 %2) @@ -178,19 +178,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f16( +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f16( + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( %0, %1, %2, @@ -200,17 +200,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32( +declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( , , i64); -define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv1f32( + %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( %0, %1, i64 %2) @@ -218,19 +218,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32( +declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32( + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( %0, %1, %2, @@ -240,17 +240,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32( +declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( , , i64); -define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv2f32( + %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( %0, %1, i64 %2) @@ -258,19 +258,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32( +declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32( + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( %0, %1, %2, @@ -280,17 +280,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32( +declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( , , i64); -define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv4f32( + %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( %0, %1, i64 %2) @@ -298,19 +298,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32( +declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32( + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( %0, %1, %2, @@ -320,17 +320,17 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32( +declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( , , i64); -define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vfwsub.w.nxv8f32( + %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( %0, %1, i64 %2) @@ -338,19 +338,19 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32( +declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( , , , , i64); -define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32( + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( %0, %1, %2, @@ -365,9 +365,9 @@ declare @llvm.riscv.vfwsub.w.nxv1f32.f16( half, i64); -define @intrinsic_vfwsub.w_wf_nxv1f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( @@ -385,9 +385,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( @@ -405,9 +405,9 @@ declare @llvm.riscv.vfwsub.w.nxv2f32.f16( half, i64); -define @intrinsic_vfwsub.w_wf_nxv2f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( @@ -425,9 +425,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( @@ -445,9 +445,9 @@ declare @llvm.riscv.vfwsub.w.nxv4f32.f16( half, i64); -define @intrinsic_vfwsub.w_wf_nxv4f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( @@ -465,9 +465,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( @@ -485,9 +485,9 @@ declare @llvm.riscv.vfwsub.w.nxv8f32.f16( half, i64); -define @intrinsic_vfwsub.w_wf_nxv8f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( @@ -505,9 +505,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( @@ -525,9 +525,9 @@ declare @llvm.riscv.vfwsub.w.nxv16f32.f16( half, i64); -define @intrinsic_vfwsub.w_wf_nxv16f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( @@ -545,9 +545,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( @@ -565,9 +565,9 @@ declare @llvm.riscv.vfwsub.w.nxv1f64.f32( float, i64); -define @intrinsic_vfwsub.w_wf_nxv1f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( @@ -585,9 +585,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( @@ -605,9 +605,9 @@ declare @llvm.riscv.vfwsub.w.nxv2f64.f32( float, i64); -define @intrinsic_vfwsub.w_wf_nxv2f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( @@ -625,9 +625,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( @@ -645,9 +645,9 @@ declare @llvm.riscv.vfwsub.w.nxv4f64.f32( float, i64); -define @intrinsic_vfwsub.w_wf_nxv4f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( @@ -665,9 +665,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( @@ -685,9 +685,9 @@ declare @llvm.riscv.vfwsub.w.nxv8f64.f32( float, i64); -define @intrinsic_vfwsub.w_wf_nxv8f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( @@ -705,9 +705,9 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( , i64); -define @intrinsic_vfwsub.w_mask_wf_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_f32 +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll index 63218bfebb657..761470d6a7e2f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmadc.nxv1i1.nxv1i8( +declare @llvm.riscv.vmadc.nxv1i8.nxv1i8( , , i32); @@ -10,7 +10,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmadc.nxv1i8.nxv1i8( %0, %1, i32 %2) @@ -18,7 +18,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i8( +declare @llvm.riscv.vmadc.nxv2i8.nxv2i8( , , i32); @@ -28,7 +28,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmadc.nxv2i8.nxv2i8( %0, %1, i32 %2) @@ -36,7 +36,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i8( +declare @llvm.riscv.vmadc.nxv4i8.nxv4i8( , , i32); @@ -46,7 +46,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmadc.nxv4i8.nxv4i8( %0, %1, i32 %2) @@ -54,7 +54,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i8( +declare @llvm.riscv.vmadc.nxv8i8.nxv8i8( , , i32); @@ -64,7 +64,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmadc.nxv8i8.nxv8i8( %0, %1, i32 %2) @@ -72,7 +72,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.nxv16i8( +declare @llvm.riscv.vmadc.nxv16i8.nxv16i8( , , i32); @@ -82,7 +82,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmadc.nxv16i8.nxv16i8( %0, %1, i32 %2) @@ -90,7 +90,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.nxv32i8( +declare @llvm.riscv.vmadc.nxv32i8.nxv32i8( , , i32); @@ -100,7 +100,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmadc.nxv32i8.nxv32i8( %0, %1, i32 %2) @@ -108,7 +108,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i1.nxv64i8( +declare @llvm.riscv.vmadc.nxv64i8.nxv64i8( , , i32); @@ -118,7 +118,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmadc.nxv64i8.nxv64i8( %0, %1, i32 %2) @@ -126,7 +126,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.nxv1i16( +declare @llvm.riscv.vmadc.nxv1i16.nxv1i16( , , i32); @@ -136,7 +136,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmadc.nxv1i16.nxv1i16( %0, %1, i32 %2) @@ -144,7 +144,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i16( +declare @llvm.riscv.vmadc.nxv2i16.nxv2i16( , , i32); @@ -154,7 +154,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmadc.nxv2i16.nxv2i16( %0, %1, i32 %2) @@ -162,7 +162,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i16( +declare @llvm.riscv.vmadc.nxv4i16.nxv4i16( , , i32); @@ -172,7 +172,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmadc.nxv4i16.nxv4i16( %0, %1, i32 %2) @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i16( +declare @llvm.riscv.vmadc.nxv8i16.nxv8i16( , , i32); @@ -190,7 +190,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmadc.nxv8i16.nxv8i16( %0, %1, i32 %2) @@ -198,7 +198,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.nxv16i16( +declare @llvm.riscv.vmadc.nxv16i16.nxv16i16( , , i32); @@ -208,7 +208,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmadc.nxv16i16.nxv16i16( %0, %1, i32 %2) @@ -216,7 +216,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.nxv32i16( +declare @llvm.riscv.vmadc.nxv32i16.nxv32i16( , , i32); @@ -226,7 +226,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmadc.nxv32i16.nxv32i16( %0, %1, i32 %2) @@ -234,7 +234,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.nxv1i32( +declare @llvm.riscv.vmadc.nxv1i32.nxv1i32( , , i32); @@ -244,7 +244,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmadc.nxv1i32.nxv1i32( %0, %1, i32 %2) @@ -252,7 +252,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i32( +declare @llvm.riscv.vmadc.nxv2i32.nxv2i32( , , i32); @@ -262,7 +262,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmadc.nxv2i32.nxv2i32( %0, %1, i32 %2) @@ -270,7 +270,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i32( +declare @llvm.riscv.vmadc.nxv4i32.nxv4i32( , , i32); @@ -280,7 +280,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmadc.nxv4i32.nxv4i32( %0, %1, i32 %2) @@ -288,7 +288,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i32( +declare @llvm.riscv.vmadc.nxv8i32.nxv8i32( , , i32); @@ -298,7 +298,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmadc.nxv8i32.nxv8i32( %0, %1, i32 %2) @@ -306,7 +306,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.nxv16i32( +declare @llvm.riscv.vmadc.nxv16i32.nxv16i32( , , i32); @@ -316,7 +316,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmadc.nxv16i32.nxv16i32( %0, %1, i32 %2) @@ -324,7 +324,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i8( +declare @llvm.riscv.vmadc.nxv1i8.i8( , i8, i32); @@ -334,7 +334,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.nxv1i8.i8( %0, i8 %1, i32 %2) @@ -342,7 +342,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i8( +declare @llvm.riscv.vmadc.nxv2i8.i8( , i8, i32); @@ -352,7 +352,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.nxv2i8.i8( %0, i8 %1, i32 %2) @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i8( +declare @llvm.riscv.vmadc.nxv4i8.i8( , i8, i32); @@ -370,7 +370,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.nxv4i8.i8( %0, i8 %1, i32 %2) @@ -378,7 +378,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i8( +declare @llvm.riscv.vmadc.nxv8i8.i8( , i8, i32); @@ -388,7 +388,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.nxv8i8.i8( %0, i8 %1, i32 %2) @@ -396,7 +396,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.i8( +declare @llvm.riscv.vmadc.nxv16i8.i8( , i8, i32); @@ -406,7 +406,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.nxv16i8.i8( %0, i8 %1, i32 %2) @@ -414,7 +414,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.i8( +declare @llvm.riscv.vmadc.nxv32i8.i8( , i8, i32); @@ -424,7 +424,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.nxv32i8.i8( %0, i8 %1, i32 %2) @@ -432,7 +432,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i1.i8( +declare @llvm.riscv.vmadc.nxv64i8.i8( , i8, i32); @@ -442,7 +442,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.nxv64i8.i8( %0, i8 %1, i32 %2) @@ -450,7 +450,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i16( +declare @llvm.riscv.vmadc.nxv1i16.i16( , i16, i32); @@ -460,7 +460,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.nxv1i16.i16( %0, i16 %1, i32 %2) @@ -468,7 +468,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i16( +declare @llvm.riscv.vmadc.nxv2i16.i16( , i16, i32); @@ -478,7 +478,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.nxv2i16.i16( %0, i16 %1, i32 %2) @@ -486,7 +486,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i16( +declare @llvm.riscv.vmadc.nxv4i16.i16( , i16, i32); @@ -496,7 +496,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.nxv4i16.i16( %0, i16 %1, i32 %2) @@ -504,7 +504,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i16( +declare @llvm.riscv.vmadc.nxv8i16.i16( , i16, i32); @@ -514,7 +514,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.nxv8i16.i16( %0, i16 %1, i32 %2) @@ -522,7 +522,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.i16( +declare @llvm.riscv.vmadc.nxv16i16.i16( , i16, i32); @@ -532,7 +532,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.nxv16i16.i16( %0, i16 %1, i32 %2) @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.i16( +declare @llvm.riscv.vmadc.nxv32i16.i16( , i16, i32); @@ -550,7 +550,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.nxv32i16.i16( %0, i16 %1, i32 %2) @@ -558,7 +558,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i32( +declare @llvm.riscv.vmadc.nxv1i32.i32( , i32, i32); @@ -568,7 +568,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.nxv1i32.i32( %0, i32 %1, i32 %2) @@ -576,7 +576,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i32( +declare @llvm.riscv.vmadc.nxv2i32.i32( , i32, i32); @@ -586,7 +586,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.nxv2i32.i32( %0, i32 %1, i32 %2) @@ -594,7 +594,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i32( +declare @llvm.riscv.vmadc.nxv4i32.i32( , i32, i32); @@ -604,7 +604,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.nxv4i32.i32( %0, i32 %1, i32 %2) @@ -612,7 +612,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i32( +declare @llvm.riscv.vmadc.nxv8i32.i32( , i32, i32); @@ -622,7 +622,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.nxv8i32.i32( %0, i32 %1, i32 %2) @@ -630,7 +630,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.i32( +declare @llvm.riscv.vmadc.nxv16i32.i32( , i32, i32); @@ -640,7 +640,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.nxv16i32.i32( %0, i32 %1, i32 %2) @@ -653,7 +653,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.nxv1i8.i8( %0, i8 9, i32 %1) @@ -666,7 +666,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.nxv2i8.i8( %0, i8 -9, i32 %1) @@ -679,7 +679,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.nxv4i8.i8( %0, i8 9, i32 %1) @@ -692,7 +692,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.nxv8i8.i8( %0, i8 -9, i32 %1) @@ -705,7 +705,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.nxv16i8.i8( %0, i8 9, i32 %1) @@ -718,7 +718,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.nxv32i8.i8( %0, i8 -9, i32 %1) @@ -731,7 +731,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.nxv64i8.i8( %0, i8 9, i32 %1) @@ -744,7 +744,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.nxv1i16.i16( %0, i16 -9, i32 %1) @@ -757,7 +757,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.nxv2i16.i16( %0, i16 9, i32 %1) @@ -770,7 +770,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.nxv4i16.i16( %0, i16 -9, i32 %1) @@ -783,7 +783,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.nxv8i16.i16( %0, i16 9, i32 %1) @@ -796,7 +796,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.nxv16i16.i16( %0, i16 -9, i32 %1) @@ -809,7 +809,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.nxv32i16.i16( %0, i16 9, i32 %1) @@ -822,7 +822,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.nxv1i32.i32( %0, i32 -9, i32 %1) @@ -835,7 +835,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.nxv2i32.i32( %0, i32 9, i32 %1) @@ -848,7 +848,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.nxv4i32.i32( %0, i32 -9, i32 %1) @@ -861,7 +861,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.nxv8i32.i32( %0, i32 9, i32 %1) @@ -874,7 +874,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.nxv16i32.i32( %0, i32 -9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll index 1b6c8eb93ea37..71958e41b06d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmadc.nxv1i1.nxv1i8( +declare @llvm.riscv.vmadc.nxv1i8.nxv1i8( , , i64); @@ -10,7 +10,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmadc.nxv1i8.nxv1i8( %0, %1, i64 %2) @@ -18,7 +18,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i8( +declare @llvm.riscv.vmadc.nxv2i8.nxv2i8( , , i64); @@ -28,7 +28,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmadc.nxv2i8.nxv2i8( %0, %1, i64 %2) @@ -36,7 +36,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i8( +declare @llvm.riscv.vmadc.nxv4i8.nxv4i8( , , i64); @@ -46,7 +46,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmadc.nxv4i8.nxv4i8( %0, %1, i64 %2) @@ -54,7 +54,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i8( +declare @llvm.riscv.vmadc.nxv8i8.nxv8i8( , , i64); @@ -64,7 +64,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmadc.nxv8i8.nxv8i8( %0, %1, i64 %2) @@ -72,7 +72,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.nxv16i8( +declare @llvm.riscv.vmadc.nxv16i8.nxv16i8( , , i64); @@ -82,7 +82,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmadc.nxv16i8.nxv16i8( %0, %1, i64 %2) @@ -90,7 +90,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.nxv32i8( +declare @llvm.riscv.vmadc.nxv32i8.nxv32i8( , , i64); @@ -100,7 +100,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmadc.nxv32i8.nxv32i8( %0, %1, i64 %2) @@ -108,7 +108,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i1.nxv64i8( +declare @llvm.riscv.vmadc.nxv64i8.nxv64i8( , , i64); @@ -118,7 +118,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmadc.nxv64i8.nxv64i8( %0, %1, i64 %2) @@ -126,7 +126,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.nxv1i16( +declare @llvm.riscv.vmadc.nxv1i16.nxv1i16( , , i64); @@ -136,7 +136,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmadc.nxv1i16.nxv1i16( %0, %1, i64 %2) @@ -144,7 +144,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i16( +declare @llvm.riscv.vmadc.nxv2i16.nxv2i16( , , i64); @@ -154,7 +154,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmadc.nxv2i16.nxv2i16( %0, %1, i64 %2) @@ -162,7 +162,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i16( +declare @llvm.riscv.vmadc.nxv4i16.nxv4i16( , , i64); @@ -172,7 +172,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmadc.nxv4i16.nxv4i16( %0, %1, i64 %2) @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i16( +declare @llvm.riscv.vmadc.nxv8i16.nxv8i16( , , i64); @@ -190,7 +190,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmadc.nxv8i16.nxv8i16( %0, %1, i64 %2) @@ -198,7 +198,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.nxv16i16( +declare @llvm.riscv.vmadc.nxv16i16.nxv16i16( , , i64); @@ -208,7 +208,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmadc.nxv16i16.nxv16i16( %0, %1, i64 %2) @@ -216,7 +216,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.nxv32i16( +declare @llvm.riscv.vmadc.nxv32i16.nxv32i16( , , i64); @@ -226,7 +226,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmadc.nxv32i16.nxv32i16( %0, %1, i64 %2) @@ -234,7 +234,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.nxv1i32( +declare @llvm.riscv.vmadc.nxv1i32.nxv1i32( , , i64); @@ -244,7 +244,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmadc.nxv1i32.nxv1i32( %0, %1, i64 %2) @@ -252,7 +252,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i32( +declare @llvm.riscv.vmadc.nxv2i32.nxv2i32( , , i64); @@ -262,7 +262,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmadc.nxv2i32.nxv2i32( %0, %1, i64 %2) @@ -270,7 +270,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i32( +declare @llvm.riscv.vmadc.nxv4i32.nxv4i32( , , i64); @@ -280,7 +280,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmadc.nxv4i32.nxv4i32( %0, %1, i64 %2) @@ -288,7 +288,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i32( +declare @llvm.riscv.vmadc.nxv8i32.nxv8i32( , , i64); @@ -298,7 +298,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmadc.nxv8i32.nxv8i32( %0, %1, i64 %2) @@ -306,7 +306,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.nxv16i32( +declare @llvm.riscv.vmadc.nxv16i32.nxv16i32( , , i64); @@ -316,7 +316,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmadc.nxv16i32.nxv16i32( %0, %1, i64 %2) @@ -324,7 +324,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.nxv1i64( +declare @llvm.riscv.vmadc.nxv1i64.nxv1i64( , , i64); @@ -334,7 +334,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i64( + %a = call @llvm.riscv.vmadc.nxv1i64.nxv1i64( %0, %1, i64 %2) @@ -342,7 +342,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.nxv2i64( +declare @llvm.riscv.vmadc.nxv2i64.nxv2i64( , , i64); @@ -352,7 +352,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i64( + %a = call @llvm.riscv.vmadc.nxv2i64.nxv2i64( %0, %1, i64 %2) @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.nxv4i64( +declare @llvm.riscv.vmadc.nxv4i64.nxv4i64( , , i64); @@ -370,7 +370,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i64( + %a = call @llvm.riscv.vmadc.nxv4i64.nxv4i64( %0, %1, i64 %2) @@ -378,7 +378,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.nxv8i64( +declare @llvm.riscv.vmadc.nxv8i64.nxv8i64( , , i64); @@ -388,7 +388,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i64( + %a = call @llvm.riscv.vmadc.nxv8i64.nxv8i64( %0, %1, i64 %2) @@ -396,7 +396,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i8( +declare @llvm.riscv.vmadc.nxv1i8.i8( , i8, i64); @@ -406,7 +406,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.nxv1i8.i8( %0, i8 %1, i64 %2) @@ -414,7 +414,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i8( +declare @llvm.riscv.vmadc.nxv2i8.i8( , i8, i64); @@ -424,7 +424,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.nxv2i8.i8( %0, i8 %1, i64 %2) @@ -432,7 +432,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i8( +declare @llvm.riscv.vmadc.nxv4i8.i8( , i8, i64); @@ -442,7 +442,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.nxv4i8.i8( %0, i8 %1, i64 %2) @@ -450,7 +450,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i8( +declare @llvm.riscv.vmadc.nxv8i8.i8( , i8, i64); @@ -460,7 +460,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.nxv8i8.i8( %0, i8 %1, i64 %2) @@ -468,7 +468,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.i8( +declare @llvm.riscv.vmadc.nxv16i8.i8( , i8, i64); @@ -478,7 +478,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.nxv16i8.i8( %0, i8 %1, i64 %2) @@ -486,7 +486,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.i8( +declare @llvm.riscv.vmadc.nxv32i8.i8( , i8, i64); @@ -496,7 +496,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.nxv32i8.i8( %0, i8 %1, i64 %2) @@ -504,7 +504,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i1.i8( +declare @llvm.riscv.vmadc.nxv64i8.i8( , i8, i64); @@ -514,7 +514,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.nxv64i8.i8( %0, i8 %1, i64 %2) @@ -522,7 +522,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i16( +declare @llvm.riscv.vmadc.nxv1i16.i16( , i16, i64); @@ -532,7 +532,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.nxv1i16.i16( %0, i16 %1, i64 %2) @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i16( +declare @llvm.riscv.vmadc.nxv2i16.i16( , i16, i64); @@ -550,7 +550,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.nxv2i16.i16( %0, i16 %1, i64 %2) @@ -558,7 +558,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i16( +declare @llvm.riscv.vmadc.nxv4i16.i16( , i16, i64); @@ -568,7 +568,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.nxv4i16.i16( %0, i16 %1, i64 %2) @@ -576,7 +576,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i16( +declare @llvm.riscv.vmadc.nxv8i16.i16( , i16, i64); @@ -586,7 +586,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.nxv8i16.i16( %0, i16 %1, i64 %2) @@ -594,7 +594,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.i16( +declare @llvm.riscv.vmadc.nxv16i16.i16( , i16, i64); @@ -604,7 +604,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.nxv16i16.i16( %0, i16 %1, i64 %2) @@ -612,7 +612,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i1.i16( +declare @llvm.riscv.vmadc.nxv32i16.i16( , i16, i64); @@ -622,7 +622,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.nxv32i16.i16( %0, i16 %1, i64 %2) @@ -630,7 +630,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i32( +declare @llvm.riscv.vmadc.nxv1i32.i32( , i32, i64); @@ -640,7 +640,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.nxv1i32.i32( %0, i32 %1, i64 %2) @@ -648,7 +648,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i32( +declare @llvm.riscv.vmadc.nxv2i32.i32( , i32, i64); @@ -658,7 +658,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.nxv2i32.i32( %0, i32 %1, i64 %2) @@ -666,7 +666,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i32( +declare @llvm.riscv.vmadc.nxv4i32.i32( , i32, i64); @@ -676,7 +676,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.nxv4i32.i32( %0, i32 %1, i64 %2) @@ -684,7 +684,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i32( +declare @llvm.riscv.vmadc.nxv8i32.i32( , i32, i64); @@ -694,7 +694,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.nxv8i32.i32( %0, i32 %1, i64 %2) @@ -702,7 +702,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i1.i32( +declare @llvm.riscv.vmadc.nxv16i32.i32( , i32, i64); @@ -712,7 +712,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.nxv16i32.i32( %0, i32 %1, i64 %2) @@ -720,7 +720,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i1.i64( +declare @llvm.riscv.vmadc.nxv1i64.i64( , i64, i64); @@ -730,7 +730,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv1i1.i64( + %a = call @llvm.riscv.vmadc.nxv1i64.i64( %0, i64 %1, i64 %2) @@ -738,7 +738,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i1.i64( +declare @llvm.riscv.vmadc.nxv2i64.i64( , i64, i64); @@ -748,7 +748,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv2i1.i64( + %a = call @llvm.riscv.vmadc.nxv2i64.i64( %0, i64 %1, i64 %2) @@ -756,7 +756,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i1.i64( +declare @llvm.riscv.vmadc.nxv4i64.i64( , i64, i64); @@ -766,7 +766,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv4i1.i64( + %a = call @llvm.riscv.vmadc.nxv4i64.i64( %0, i64 %1, i64 %2) @@ -774,7 +774,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i1.i64( +declare @llvm.riscv.vmadc.nxv8i64.i64( , i64, i64); @@ -784,7 +784,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmadc.nxv8i1.i64( + %a = call @llvm.riscv.vmadc.nxv8i64.i64( %0, i64 %1, i64 %2) @@ -797,7 +797,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.nxv1i8.i8( %0, i8 9, i64 %1) @@ -810,7 +810,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.nxv2i8.i8( %0, i8 -9, i64 %1) @@ -823,7 +823,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.nxv4i8.i8( %0, i8 9, i64 %1) @@ -836,7 +836,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.nxv8i8.i8( %0, i8 -9, i64 %1) @@ -849,7 +849,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.nxv16i8.i8( %0, i8 9, i64 %1) @@ -862,7 +862,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.nxv32i8.i8( %0, i8 -9, i64 %1) @@ -875,7 +875,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.nxv64i8.i8( %0, i8 9, i64 %1) @@ -888,7 +888,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.nxv1i16.i16( %0, i16 -9, i64 %1) @@ -901,7 +901,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.nxv2i16.i16( %0, i16 9, i64 %1) @@ -914,7 +914,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.nxv4i16.i16( %0, i16 -9, i64 %1) @@ -927,7 +927,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.nxv8i16.i16( %0, i16 9, i64 %1) @@ -940,7 +940,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.nxv16i16.i16( %0, i16 -9, i64 %1) @@ -953,7 +953,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.nxv32i16.i16( %0, i16 9, i64 %1) @@ -966,7 +966,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.nxv1i32.i32( %0, i32 -9, i64 %1) @@ -979,7 +979,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.nxv2i32.i32( %0, i32 9, i64 %1) @@ -992,7 +992,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.nxv4i32.i32( %0, i32 -9, i64 %1) @@ -1005,7 +1005,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.nxv8i32.i32( %0, i32 9, i64 %1) @@ -1018,7 +1018,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.nxv16i32.i32( %0, i32 -9, i64 %1) @@ -1031,7 +1031,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv1i1.i64( + %a = call @llvm.riscv.vmadc.nxv1i64.i64( %0, i64 9, i64 %1) @@ -1044,7 +1044,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv2i1.i64( + %a = call @llvm.riscv.vmadc.nxv2i64.i64( %0, i64 -9, i64 %1) @@ -1057,7 +1057,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmadc.nxv4i1.i64( + %a = call @llvm.riscv.vmadc.nxv4i64.i64( %0, i64 9, i64 %1) @@ -1070,7 +1070,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 - %a = call @llvm.riscv.vmadc.nxv8i1.i64( + %a = call @llvm.riscv.vmadc.nxv8i64.i64( %0, i64 -9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll index 3dbdf047b7e76..a59cfbf669f0b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( +declare @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( , , , @@ -11,7 +11,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( %0, %1, %2, @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( +declare @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( , , , @@ -31,7 +31,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( %0, %1, %2, @@ -40,7 +40,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( +declare @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( , , , @@ -51,7 +51,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( %0, %1, %2, @@ -60,7 +60,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( +declare @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( , , , @@ -71,7 +71,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( %0, %1, %2, @@ -80,7 +80,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( +declare @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( , , , @@ -91,7 +91,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( %0, %1, %2, @@ -100,7 +100,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( +declare @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( , , , @@ -111,7 +111,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( %0, %1, %2, @@ -120,7 +120,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( +declare @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( , , , @@ -131,7 +131,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( %0, %1, %2, @@ -140,7 +140,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( +declare @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( , , , @@ -151,7 +151,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( %0, %1, %2, @@ -160,7 +160,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( +declare @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( , , , @@ -171,7 +171,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( %0, %1, %2, @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( +declare @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( , , , @@ -191,7 +191,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( %0, %1, %2, @@ -200,7 +200,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( +declare @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( , , , @@ -211,7 +211,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( %0, %1, %2, @@ -220,7 +220,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( +declare @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( , , , @@ -231,7 +231,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( %0, %1, %2, @@ -240,7 +240,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( +declare @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( , , , @@ -251,7 +251,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( %0, %1, %2, @@ -260,7 +260,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( +declare @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( , , , @@ -271,7 +271,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( %0, %1, %2, @@ -280,7 +280,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( +declare @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( , , , @@ -291,7 +291,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( %0, %1, %2, @@ -300,7 +300,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( +declare @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( , , , @@ -311,7 +311,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( %0, %1, %2, @@ -320,7 +320,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( +declare @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( , , , @@ -331,7 +331,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( %0, %1, %2, @@ -340,7 +340,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( +declare @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( , , , @@ -351,7 +351,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( %0, %1, %2, @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv1i8.i8( , i8, , @@ -371,7 +371,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8( %0, i8 %1, %2, @@ -380,7 +380,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv2i8.i8( , i8, , @@ -391,7 +391,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8( %0, i8 %1, %2, @@ -400,7 +400,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv4i8.i8( , i8, , @@ -411,7 +411,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8( %0, i8 %1, %2, @@ -420,7 +420,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv8i8.i8( , i8, , @@ -431,7 +431,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( %0, i8 %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv16i8.i8( , i8, , @@ -451,7 +451,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8( %0, i8 %1, %2, @@ -460,7 +460,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv32i8.i8( , i8, , @@ -471,7 +471,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8( %0, i8 %1, %2, @@ -480,7 +480,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv64i8.i8( , i8, , @@ -491,7 +491,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8( %0, i8 %1, %2, @@ -500,7 +500,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv1i16.i16( , i16, , @@ -511,7 +511,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16( %0, i16 %1, %2, @@ -520,7 +520,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv2i16.i16( , i16, , @@ -531,7 +531,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16( %0, i16 %1, %2, @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv4i16.i16( , i16, , @@ -551,7 +551,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( %0, i16 %1, %2, @@ -560,7 +560,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv8i16.i16( , i16, , @@ -571,7 +571,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16( %0, i16 %1, %2, @@ -580,7 +580,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv16i16.i16( , i16, , @@ -591,7 +591,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16( %0, i16 %1, %2, @@ -600,7 +600,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv32i16.i16( , i16, , @@ -611,7 +611,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16( %0, i16 %1, %2, @@ -620,7 +620,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv1i32.i32( , i32, , @@ -631,7 +631,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32( %0, i32 %1, %2, @@ -640,7 +640,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv2i32.i32( , i32, , @@ -651,7 +651,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( %0, i32 %1, %2, @@ -660,7 +660,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv4i32.i32( , i32, , @@ -671,7 +671,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32( %0, i32 %1, %2, @@ -680,7 +680,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv8i32.i32( , i32, , @@ -691,7 +691,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32( %0, i32 %1, %2, @@ -700,7 +700,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv16i32.i32( , i32, , @@ -711,7 +711,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32( %0, i32 %1, %2, @@ -725,7 +725,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8( %0, i8 9, %1, @@ -738,10 +738,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( @llvm.riscv.vmadc.carry.in.nxv2i1.i8( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8( %0, - i8 -9, + i8 9, %1, i32 %2) @@ -753,7 +753,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8( %0, i8 9, %1, @@ -766,10 +766,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( @llvm.riscv.vmadc.carry.in.nxv8i1.i8( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( %0, - i8 -9, + i8 9, %1, i32 %2) @@ -781,7 +781,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8( %0, i8 9, %1, @@ -794,10 +794,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( @llvm.riscv.vmadc.carry.in.nxv32i1.i8( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8( %0, - i8 -9, + i8 9, %1, i32 %2) @@ -809,7 +809,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8( %0, i8 9, %1, @@ -822,10 +822,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( @llvm.riscv.vmadc.carry.in.nxv1i1.i16( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16( %0, - i16 -9, + i16 9, %1, i32 %2) @@ -837,7 +837,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16( %0, i16 9, %1, @@ -850,10 +850,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( @llvm.riscv.vmadc.carry.in.nxv4i1.i16( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( %0, - i16 -9, + i16 9, %1, i32 %2) @@ -865,7 +865,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16( %0, i16 9, %1, @@ -878,10 +878,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( @llvm.riscv.vmadc.carry.in.nxv16i1.i16( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16( %0, - i16 -9, + i16 9, %1, i32 %2) @@ -893,7 +893,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16( %0, i16 9, %1, @@ -906,10 +906,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( @llvm.riscv.vmadc.carry.in.nxv1i1.i32( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32( %0, - i32 -9, + i32 9, %1, i32 %2) @@ -921,7 +921,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( %0, i32 9, %1, @@ -934,10 +934,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( @llvm.riscv.vmadc.carry.in.nxv4i1.i32( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32( %0, - i32 -9, + i32 9, %1, i32 %2) @@ -949,7 +949,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32( %0, i32 9, %1, @@ -962,10 +962,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( @llvm.riscv.vmadc.carry.in.nxv16i1.i32( +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32( %0, - i32 -9, + i32 9, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll index b94a2b9f4d986..7ba10e48ea49c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( +declare @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( , , , @@ -11,7 +11,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( %0, %1, %2, @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( +declare @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( , , , @@ -31,7 +31,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( %0, %1, %2, @@ -40,7 +40,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( +declare @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( , , , @@ -51,7 +51,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( %0, %1, %2, @@ -60,7 +60,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( +declare @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( , , , @@ -71,7 +71,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( %0, %1, %2, @@ -80,7 +80,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( +declare @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( , , , @@ -91,7 +91,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( %0, %1, %2, @@ -100,7 +100,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( +declare @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( , , , @@ -111,7 +111,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( %0, %1, %2, @@ -120,7 +120,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( +declare @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( , , , @@ -131,7 +131,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( %0, %1, %2, @@ -140,7 +140,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( +declare @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( , , , @@ -151,7 +151,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( %0, %1, %2, @@ -160,7 +160,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( +declare @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( , , , @@ -171,7 +171,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( %0, %1, %2, @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( +declare @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( , , , @@ -191,7 +191,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( %0, %1, %2, @@ -200,7 +200,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( +declare @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( , , , @@ -211,7 +211,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( %0, %1, %2, @@ -220,7 +220,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( +declare @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( , , , @@ -231,7 +231,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( %0, %1, %2, @@ -240,7 +240,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( +declare @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( , , , @@ -251,7 +251,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( %0, %1, %2, @@ -260,7 +260,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( +declare @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( , , , @@ -271,7 +271,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( %0, %1, %2, @@ -280,7 +280,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( +declare @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( , , , @@ -291,7 +291,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( %0, %1, %2, @@ -300,7 +300,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( +declare @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( , , , @@ -311,7 +311,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( %0, %1, %2, @@ -320,7 +320,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( +declare @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( , , , @@ -331,7 +331,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( %0, %1, %2, @@ -340,7 +340,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( +declare @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( , , , @@ -351,7 +351,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( %0, %1, %2, @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64( +declare @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( , , , @@ -371,7 +371,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( %0, %1, %2, @@ -380,7 +380,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64( +declare @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( , , , @@ -391,7 +391,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( %0, %1, %2, @@ -400,7 +400,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64( +declare @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( , , , @@ -411,7 +411,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( %0, %1, %2, @@ -420,7 +420,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64( +declare @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( , , , @@ -431,7 +431,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv1i8.i8( , i8, , @@ -451,7 +451,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8( %0, i8 %1, %2, @@ -460,7 +460,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv2i8.i8( , i8, , @@ -471,7 +471,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8( %0, i8 %1, %2, @@ -480,7 +480,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv4i8.i8( , i8, , @@ -491,7 +491,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8( %0, i8 %1, %2, @@ -500,7 +500,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv8i8.i8( , i8, , @@ -511,7 +511,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( %0, i8 %1, %2, @@ -520,7 +520,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv16i8.i8( , i8, , @@ -531,7 +531,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8( %0, i8 %1, %2, @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv32i8.i8( , i8, , @@ -551,7 +551,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8( %0, i8 %1, %2, @@ -560,7 +560,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i1.i8( +declare @llvm.riscv.vmadc.carry.in.nxv64i8.i8( , i8, , @@ -571,7 +571,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8( %0, i8 %1, %2, @@ -580,7 +580,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv1i16.i16( , i16, , @@ -591,7 +591,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16( %0, i16 %1, %2, @@ -600,7 +600,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv2i16.i16( , i16, , @@ -611,7 +611,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16( %0, i16 %1, %2, @@ -620,7 +620,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv4i16.i16( , i16, , @@ -631,7 +631,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( %0, i16 %1, %2, @@ -640,7 +640,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv8i16.i16( , i16, , @@ -651,7 +651,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16( %0, i16 %1, %2, @@ -660,7 +660,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv16i16.i16( , i16, , @@ -671,7 +671,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16( %0, i16 %1, %2, @@ -680,7 +680,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i1.i16( +declare @llvm.riscv.vmadc.carry.in.nxv32i16.i16( , i16, , @@ -691,7 +691,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16( %0, i16 %1, %2, @@ -700,7 +700,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv1i32.i32( , i32, , @@ -711,7 +711,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32( %0, i32 %1, %2, @@ -720,7 +720,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv2i32.i32( , i32, , @@ -731,7 +731,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( %0, i32 %1, %2, @@ -740,7 +740,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv4i32.i32( , i32, , @@ -751,7 +751,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32( %0, i32 %1, %2, @@ -760,7 +760,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv8i32.i32( , i32, , @@ -771,7 +771,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32( %0, i32 %1, %2, @@ -780,7 +780,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i1.i32( +declare @llvm.riscv.vmadc.carry.in.nxv16i32.i32( , i32, , @@ -791,7 +791,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32( %0, i32 %1, %2, @@ -800,7 +800,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i1.i64( +declare @llvm.riscv.vmadc.carry.in.nxv1i64.i64( , i64, , @@ -811,7 +811,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( %0, i64 %1, %2, @@ -820,7 +820,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i1.i64( +declare @llvm.riscv.vmadc.carry.in.nxv2i64.i64( , i64, , @@ -831,7 +831,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64( %0, i64 %1, %2, @@ -840,7 +840,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i1.i64( +declare @llvm.riscv.vmadc.carry.in.nxv4i64.i64( , i64, , @@ -851,7 +851,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64( %0, i64 %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i1.i64( +declare @llvm.riscv.vmadc.carry.in.nxv8i64.i64( , i64, , @@ -871,7 +871,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64( %0, i64 %1, %2, @@ -885,7 +885,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8( %0, i8 9, %1, @@ -899,7 +899,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8( %0, i8 9, %1, @@ -913,7 +913,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8( %0, i8 9, %1, @@ -927,7 +927,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( %0, i8 9, %1, @@ -941,7 +941,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8( %0, i8 9, %1, @@ -955,7 +955,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8( %0, i8 9, %1, @@ -969,7 +969,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8( %0, i8 9, %1, @@ -983,7 +983,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16( %0, i16 9, %1, @@ -997,7 +997,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16( %0, i16 9, %1, @@ -1011,7 +1011,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( %0, i16 9, %1, @@ -1025,7 +1025,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16( %0, i16 9, %1, @@ -1039,7 +1039,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16( %0, i16 9, %1, @@ -1053,7 +1053,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16( %0, i16 9, %1, @@ -1067,7 +1067,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32( %0, i32 9, %1, @@ -1081,7 +1081,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( %0, i32 9, %1, @@ -1095,7 +1095,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32( %0, i32 9, %1, @@ -1109,7 +1109,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32( %0, i32 9, %1, @@ -1123,7 +1123,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32( %0, i32 9, %1, @@ -1137,7 +1137,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( %0, i64 9, %1, @@ -1151,7 +1151,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64( %0, i64 9, %1, @@ -1165,7 +1165,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64( %0, i64 9, %1, @@ -1179,7 +1179,7 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 - %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i64( + %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64( %0, i64 9, %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll index 4e8eabcf7ea38..213c69445fab4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i8( +declare @llvm.riscv.vmsbc.nxv1i8.nxv1i8( , , i32); @@ -10,7 +10,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8( %0, %1, i32 %2) @@ -18,7 +18,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i8( +declare @llvm.riscv.vmsbc.nxv2i8.nxv2i8( , , i32); @@ -28,7 +28,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8( %0, %1, i32 %2) @@ -36,7 +36,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i8( +declare @llvm.riscv.vmsbc.nxv4i8.nxv4i8( , , i32); @@ -46,7 +46,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8( %0, %1, i32 %2) @@ -54,7 +54,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i8( +declare @llvm.riscv.vmsbc.nxv8i8.nxv8i8( , , i32); @@ -64,7 +64,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8( %0, %1, i32 %2) @@ -72,7 +72,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.nxv16i8( +declare @llvm.riscv.vmsbc.nxv16i8.nxv16i8( , , i32); @@ -82,7 +82,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8( %0, %1, i32 %2) @@ -90,7 +90,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.nxv32i8( +declare @llvm.riscv.vmsbc.nxv32i8.nxv32i8( , , i32); @@ -100,7 +100,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8( %0, %1, i32 %2) @@ -108,7 +108,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i1.nxv64i8( +declare @llvm.riscv.vmsbc.nxv64i8.nxv64i8( , , i32); @@ -118,7 +118,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8( %0, %1, i32 %2) @@ -126,7 +126,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i16( +declare @llvm.riscv.vmsbc.nxv1i16.nxv1i16( , , i32); @@ -136,7 +136,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16( %0, %1, i32 %2) @@ -144,7 +144,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i16( +declare @llvm.riscv.vmsbc.nxv2i16.nxv2i16( , , i32); @@ -154,7 +154,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16( %0, %1, i32 %2) @@ -162,7 +162,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i16( +declare @llvm.riscv.vmsbc.nxv4i16.nxv4i16( , , i32); @@ -172,7 +172,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16( %0, %1, i32 %2) @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i16( +declare @llvm.riscv.vmsbc.nxv8i16.nxv8i16( , , i32); @@ -190,7 +190,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16( %0, %1, i32 %2) @@ -198,7 +198,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.nxv16i16( +declare @llvm.riscv.vmsbc.nxv16i16.nxv16i16( , , i32); @@ -208,7 +208,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16( %0, %1, i32 %2) @@ -216,7 +216,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.nxv32i16( +declare @llvm.riscv.vmsbc.nxv32i16.nxv32i16( , , i32); @@ -226,7 +226,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16( %0, %1, i32 %2) @@ -234,7 +234,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i32( +declare @llvm.riscv.vmsbc.nxv1i32.nxv1i32( , , i32); @@ -244,7 +244,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32( %0, %1, i32 %2) @@ -252,7 +252,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i32( +declare @llvm.riscv.vmsbc.nxv2i32.nxv2i32( , , i32); @@ -262,7 +262,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32( %0, %1, i32 %2) @@ -270,7 +270,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i32( +declare @llvm.riscv.vmsbc.nxv4i32.nxv4i32( , , i32); @@ -280,7 +280,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32( %0, %1, i32 %2) @@ -288,7 +288,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i32( +declare @llvm.riscv.vmsbc.nxv8i32.nxv8i32( , , i32); @@ -298,7 +298,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32( %0, %1, i32 %2) @@ -306,7 +306,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.nxv16i32( +declare @llvm.riscv.vmsbc.nxv16i32.nxv16i32( , , i32); @@ -316,7 +316,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32( %0, %1, i32 %2) @@ -324,7 +324,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i8( +declare @llvm.riscv.vmsbc.nxv1i8.i8( , i8, i32); @@ -334,7 +334,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i8( + %a = call @llvm.riscv.vmsbc.nxv1i8.i8( %0, i8 %1, i32 %2) @@ -342,7 +342,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i8( +declare @llvm.riscv.vmsbc.nxv2i8.i8( , i8, i32); @@ -352,7 +352,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i8( + %a = call @llvm.riscv.vmsbc.nxv2i8.i8( %0, i8 %1, i32 %2) @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i8( +declare @llvm.riscv.vmsbc.nxv4i8.i8( , i8, i32); @@ -370,7 +370,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i8( + %a = call @llvm.riscv.vmsbc.nxv4i8.i8( %0, i8 %1, i32 %2) @@ -378,7 +378,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i8( +declare @llvm.riscv.vmsbc.nxv8i8.i8( , i8, i32); @@ -388,7 +388,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i8( + %a = call @llvm.riscv.vmsbc.nxv8i8.i8( %0, i8 %1, i32 %2) @@ -396,7 +396,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.i8( +declare @llvm.riscv.vmsbc.nxv16i8.i8( , i8, i32); @@ -406,7 +406,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.i8( + %a = call @llvm.riscv.vmsbc.nxv16i8.i8( %0, i8 %1, i32 %2) @@ -414,7 +414,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.i8( +declare @llvm.riscv.vmsbc.nxv32i8.i8( , i8, i32); @@ -424,7 +424,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.i8( + %a = call @llvm.riscv.vmsbc.nxv32i8.i8( %0, i8 %1, i32 %2) @@ -432,7 +432,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i1.i8( +declare @llvm.riscv.vmsbc.nxv64i8.i8( , i8, i32); @@ -442,7 +442,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv64i1.i8( + %a = call @llvm.riscv.vmsbc.nxv64i8.i8( %0, i8 %1, i32 %2) @@ -450,7 +450,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i16( +declare @llvm.riscv.vmsbc.nxv1i16.i16( , i16, i32); @@ -460,7 +460,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i16( + %a = call @llvm.riscv.vmsbc.nxv1i16.i16( %0, i16 %1, i32 %2) @@ -468,7 +468,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i16( +declare @llvm.riscv.vmsbc.nxv2i16.i16( , i16, i32); @@ -478,7 +478,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i16( + %a = call @llvm.riscv.vmsbc.nxv2i16.i16( %0, i16 %1, i32 %2) @@ -486,7 +486,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i16( +declare @llvm.riscv.vmsbc.nxv4i16.i16( , i16, i32); @@ -496,7 +496,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i16( + %a = call @llvm.riscv.vmsbc.nxv4i16.i16( %0, i16 %1, i32 %2) @@ -504,7 +504,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i16( +declare @llvm.riscv.vmsbc.nxv8i16.i16( , i16, i32); @@ -514,7 +514,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i16( + %a = call @llvm.riscv.vmsbc.nxv8i16.i16( %0, i16 %1, i32 %2) @@ -522,7 +522,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.i16( +declare @llvm.riscv.vmsbc.nxv16i16.i16( , i16, i32); @@ -532,7 +532,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.i16( + %a = call @llvm.riscv.vmsbc.nxv16i16.i16( %0, i16 %1, i32 %2) @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.i16( +declare @llvm.riscv.vmsbc.nxv32i16.i16( , i16, i32); @@ -550,7 +550,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.i16( + %a = call @llvm.riscv.vmsbc.nxv32i16.i16( %0, i16 %1, i32 %2) @@ -558,7 +558,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i32( +declare @llvm.riscv.vmsbc.nxv1i32.i32( , i32, i32); @@ -568,7 +568,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i32( + %a = call @llvm.riscv.vmsbc.nxv1i32.i32( %0, i32 %1, i32 %2) @@ -576,7 +576,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i32( +declare @llvm.riscv.vmsbc.nxv2i32.i32( , i32, i32); @@ -586,7 +586,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i32( + %a = call @llvm.riscv.vmsbc.nxv2i32.i32( %0, i32 %1, i32 %2) @@ -594,7 +594,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i32( +declare @llvm.riscv.vmsbc.nxv4i32.i32( , i32, i32); @@ -604,7 +604,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i32( + %a = call @llvm.riscv.vmsbc.nxv4i32.i32( %0, i32 %1, i32 %2) @@ -612,7 +612,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i32( +declare @llvm.riscv.vmsbc.nxv8i32.i32( , i32, i32); @@ -622,7 +622,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i32( + %a = call @llvm.riscv.vmsbc.nxv8i32.i32( %0, i32 %1, i32 %2) @@ -630,7 +630,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.i32( +declare @llvm.riscv.vmsbc.nxv16i32.i32( , i32, i32); @@ -640,7 +640,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.i32( + %a = call @llvm.riscv.vmsbc.nxv16i32.i32( %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll index b28aaaef3b022..7f907923ff6f6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i8( +declare @llvm.riscv.vmsbc.nxv1i8.nxv1i8( , , i64); @@ -10,7 +10,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8( %0, %1, i64 %2) @@ -18,7 +18,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i8( +declare @llvm.riscv.vmsbc.nxv2i8.nxv2i8( , , i64); @@ -28,7 +28,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8( %0, %1, i64 %2) @@ -36,7 +36,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i8( +declare @llvm.riscv.vmsbc.nxv4i8.nxv4i8( , , i64); @@ -46,7 +46,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8( %0, %1, i64 %2) @@ -54,7 +54,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i8( +declare @llvm.riscv.vmsbc.nxv8i8.nxv8i8( , , i64); @@ -64,7 +64,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8( %0, %1, i64 %2) @@ -72,7 +72,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.nxv16i8( +declare @llvm.riscv.vmsbc.nxv16i8.nxv16i8( , , i64); @@ -82,7 +82,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8( %0, %1, i64 %2) @@ -90,7 +90,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.nxv32i8( +declare @llvm.riscv.vmsbc.nxv32i8.nxv32i8( , , i64); @@ -100,7 +100,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8( %0, %1, i64 %2) @@ -108,7 +108,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i1.nxv64i8( +declare @llvm.riscv.vmsbc.nxv64i8.nxv64i8( , , i64); @@ -118,7 +118,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8( %0, %1, i64 %2) @@ -126,7 +126,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i16( +declare @llvm.riscv.vmsbc.nxv1i16.nxv1i16( , , i64); @@ -136,7 +136,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16( %0, %1, i64 %2) @@ -144,7 +144,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i16( +declare @llvm.riscv.vmsbc.nxv2i16.nxv2i16( , , i64); @@ -154,7 +154,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16( %0, %1, i64 %2) @@ -162,7 +162,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i16( +declare @llvm.riscv.vmsbc.nxv4i16.nxv4i16( , , i64); @@ -172,7 +172,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16( %0, %1, i64 %2) @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i16( +declare @llvm.riscv.vmsbc.nxv8i16.nxv8i16( , , i64); @@ -190,7 +190,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16( %0, %1, i64 %2) @@ -198,7 +198,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.nxv16i16( +declare @llvm.riscv.vmsbc.nxv16i16.nxv16i16( , , i64); @@ -208,7 +208,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16( %0, %1, i64 %2) @@ -216,7 +216,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.nxv32i16( +declare @llvm.riscv.vmsbc.nxv32i16.nxv32i16( , , i64); @@ -226,7 +226,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16( %0, %1, i64 %2) @@ -234,7 +234,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i32( +declare @llvm.riscv.vmsbc.nxv1i32.nxv1i32( , , i64); @@ -244,7 +244,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32( %0, %1, i64 %2) @@ -252,7 +252,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i32( +declare @llvm.riscv.vmsbc.nxv2i32.nxv2i32( , , i64); @@ -262,7 +262,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32( %0, %1, i64 %2) @@ -270,7 +270,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i32( +declare @llvm.riscv.vmsbc.nxv4i32.nxv4i32( , , i64); @@ -280,7 +280,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32( %0, %1, i64 %2) @@ -288,7 +288,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i32( +declare @llvm.riscv.vmsbc.nxv8i32.nxv8i32( , , i64); @@ -298,7 +298,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32( %0, %1, i64 %2) @@ -306,7 +306,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.nxv16i32( +declare @llvm.riscv.vmsbc.nxv16i32.nxv16i32( , , i64); @@ -316,7 +316,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32( %0, %1, i64 %2) @@ -324,7 +324,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.nxv1i64( +declare @llvm.riscv.vmsbc.nxv1i64.nxv1i64( , , i64); @@ -334,7 +334,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i64( + %a = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64( %0, %1, i64 %2) @@ -342,7 +342,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.nxv2i64( +declare @llvm.riscv.vmsbc.nxv2i64.nxv2i64( , , i64); @@ -352,7 +352,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i64( + %a = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64( %0, %1, i64 %2) @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.nxv4i64( +declare @llvm.riscv.vmsbc.nxv4i64.nxv4i64( , , i64); @@ -370,7 +370,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i64( + %a = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64( %0, %1, i64 %2) @@ -378,7 +378,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.nxv8i64( +declare @llvm.riscv.vmsbc.nxv8i64.nxv8i64( , , i64); @@ -388,7 +388,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i64( + %a = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64( %0, %1, i64 %2) @@ -396,7 +396,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i8( +declare @llvm.riscv.vmsbc.nxv1i8.i8( , i8, i64); @@ -406,7 +406,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i8( + %a = call @llvm.riscv.vmsbc.nxv1i8.i8( %0, i8 %1, i64 %2) @@ -414,7 +414,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i8( +declare @llvm.riscv.vmsbc.nxv2i8.i8( , i8, i64); @@ -424,7 +424,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i8( + %a = call @llvm.riscv.vmsbc.nxv2i8.i8( %0, i8 %1, i64 %2) @@ -432,7 +432,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i8( +declare @llvm.riscv.vmsbc.nxv4i8.i8( , i8, i64); @@ -442,7 +442,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i8( + %a = call @llvm.riscv.vmsbc.nxv4i8.i8( %0, i8 %1, i64 %2) @@ -450,7 +450,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i8( +declare @llvm.riscv.vmsbc.nxv8i8.i8( , i8, i64); @@ -460,7 +460,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i8( + %a = call @llvm.riscv.vmsbc.nxv8i8.i8( %0, i8 %1, i64 %2) @@ -468,7 +468,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.i8( +declare @llvm.riscv.vmsbc.nxv16i8.i8( , i8, i64); @@ -478,7 +478,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.i8( + %a = call @llvm.riscv.vmsbc.nxv16i8.i8( %0, i8 %1, i64 %2) @@ -486,7 +486,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.i8( +declare @llvm.riscv.vmsbc.nxv32i8.i8( , i8, i64); @@ -496,7 +496,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.i8( + %a = call @llvm.riscv.vmsbc.nxv32i8.i8( %0, i8 %1, i64 %2) @@ -504,7 +504,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i1.i8( +declare @llvm.riscv.vmsbc.nxv64i8.i8( , i8, i64); @@ -514,7 +514,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv64i1.i8( + %a = call @llvm.riscv.vmsbc.nxv64i8.i8( %0, i8 %1, i64 %2) @@ -522,7 +522,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i16( +declare @llvm.riscv.vmsbc.nxv1i16.i16( , i16, i64); @@ -532,7 +532,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i16( + %a = call @llvm.riscv.vmsbc.nxv1i16.i16( %0, i16 %1, i64 %2) @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i16( +declare @llvm.riscv.vmsbc.nxv2i16.i16( , i16, i64); @@ -550,7 +550,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i16( + %a = call @llvm.riscv.vmsbc.nxv2i16.i16( %0, i16 %1, i64 %2) @@ -558,7 +558,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i16( +declare @llvm.riscv.vmsbc.nxv4i16.i16( , i16, i64); @@ -568,7 +568,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i16( + %a = call @llvm.riscv.vmsbc.nxv4i16.i16( %0, i16 %1, i64 %2) @@ -576,7 +576,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i16( +declare @llvm.riscv.vmsbc.nxv8i16.i16( , i16, i64); @@ -586,7 +586,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i16( + %a = call @llvm.riscv.vmsbc.nxv8i16.i16( %0, i16 %1, i64 %2) @@ -594,7 +594,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.i16( +declare @llvm.riscv.vmsbc.nxv16i16.i16( , i16, i64); @@ -604,7 +604,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.i16( + %a = call @llvm.riscv.vmsbc.nxv16i16.i16( %0, i16 %1, i64 %2) @@ -612,7 +612,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i1.i16( +declare @llvm.riscv.vmsbc.nxv32i16.i16( , i16, i64); @@ -622,7 +622,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv32i1.i16( + %a = call @llvm.riscv.vmsbc.nxv32i16.i16( %0, i16 %1, i64 %2) @@ -630,7 +630,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i32( +declare @llvm.riscv.vmsbc.nxv1i32.i32( , i32, i64); @@ -640,7 +640,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i32( + %a = call @llvm.riscv.vmsbc.nxv1i32.i32( %0, i32 %1, i64 %2) @@ -648,7 +648,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i32( +declare @llvm.riscv.vmsbc.nxv2i32.i32( , i32, i64); @@ -658,7 +658,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i32( + %a = call @llvm.riscv.vmsbc.nxv2i32.i32( %0, i32 %1, i64 %2) @@ -666,7 +666,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i32( +declare @llvm.riscv.vmsbc.nxv4i32.i32( , i32, i64); @@ -676,7 +676,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i32( + %a = call @llvm.riscv.vmsbc.nxv4i32.i32( %0, i32 %1, i64 %2) @@ -684,7 +684,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i32( +declare @llvm.riscv.vmsbc.nxv8i32.i32( , i32, i64); @@ -694,7 +694,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i32( + %a = call @llvm.riscv.vmsbc.nxv8i32.i32( %0, i32 %1, i64 %2) @@ -702,7 +702,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i1.i32( +declare @llvm.riscv.vmsbc.nxv16i32.i32( , i32, i64); @@ -712,7 +712,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv16i1.i32( + %a = call @llvm.riscv.vmsbc.nxv16i32.i32( %0, i32 %1, i64 %2) @@ -720,7 +720,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i1.i64( +declare @llvm.riscv.vmsbc.nxv1i64.i64( , i64, i64); @@ -730,7 +730,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv1i1.i64( + %a = call @llvm.riscv.vmsbc.nxv1i64.i64( %0, i64 %1, i64 %2) @@ -738,7 +738,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i1.i64( +declare @llvm.riscv.vmsbc.nxv2i64.i64( , i64, i64); @@ -748,7 +748,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv2i1.i64( + %a = call @llvm.riscv.vmsbc.nxv2i64.i64( %0, i64 %1, i64 %2) @@ -756,7 +756,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i1.i64( +declare @llvm.riscv.vmsbc.nxv4i64.i64( , i64, i64); @@ -766,7 +766,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv4i1.i64( + %a = call @llvm.riscv.vmsbc.nxv4i64.i64( %0, i64 %1, i64 %2) @@ -774,7 +774,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i1.i64( +declare @llvm.riscv.vmsbc.nxv8i64.i64( , i64, i64); @@ -784,7 +784,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} - %a = call @llvm.riscv.vmsbc.nxv8i1.i64( + %a = call @llvm.riscv.vmsbc.nxv8i64.i64( %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll index 6a8253ffc5045..09e8c90efbf29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( , , , @@ -11,7 +11,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( %0, %1, %2, @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( , , , @@ -31,7 +31,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( %0, %1, %2, @@ -40,7 +40,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( , , , @@ -51,7 +51,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( %0, %1, %2, @@ -60,7 +60,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( , , , @@ -71,7 +71,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( %0, %1, %2, @@ -80,7 +80,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( , , , @@ -91,7 +91,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( %0, %1, %2, @@ -100,7 +100,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( , , , @@ -111,7 +111,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( %0, %1, %2, @@ -120,7 +120,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( , , , @@ -131,7 +131,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( %0, %1, %2, @@ -140,7 +140,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( , , , @@ -151,7 +151,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( %0, %1, %2, @@ -160,7 +160,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( , , , @@ -171,7 +171,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( %0, %1, %2, @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( , , , @@ -191,7 +191,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( %0, %1, %2, @@ -200,7 +200,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( , , , @@ -211,7 +211,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( %0, %1, %2, @@ -220,7 +220,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( , , , @@ -231,7 +231,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( %0, %1, %2, @@ -240,7 +240,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( , , , @@ -251,7 +251,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( %0, %1, %2, @@ -260,7 +260,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( , , , @@ -271,7 +271,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( %0, %1, %2, @@ -280,7 +280,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( , , , @@ -291,7 +291,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( %0, %1, %2, @@ -300,7 +300,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( , , , @@ -311,7 +311,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( %0, %1, %2, @@ -320,7 +320,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( , , , @@ -331,7 +331,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( %0, %1, %2, @@ -340,7 +340,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( , , , @@ -351,7 +351,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( %0, %1, %2, @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( , i8, , @@ -371,7 +371,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( %0, i8 %1, %2, @@ -380,7 +380,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( , i8, , @@ -391,7 +391,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( %0, i8 %1, %2, @@ -400,7 +400,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( , i8, , @@ -411,7 +411,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( %0, i8 %1, %2, @@ -420,7 +420,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( , i8, , @@ -431,7 +431,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( %0, i8 %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( , i8, , @@ -451,7 +451,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( %0, i8 %1, %2, @@ -460,7 +460,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( , i8, , @@ -471,7 +471,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( %0, i8 %1, %2, @@ -480,7 +480,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( , i8, , @@ -491,7 +491,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( %0, i8 %1, %2, @@ -500,7 +500,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( , i16, , @@ -511,7 +511,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( %0, i16 %1, %2, @@ -520,7 +520,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( , i16, , @@ -531,7 +531,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( %0, i16 %1, %2, @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( , i16, , @@ -551,7 +551,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( %0, i16 %1, %2, @@ -560,7 +560,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( , i16, , @@ -571,7 +571,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( %0, i16 %1, %2, @@ -580,7 +580,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( , i16, , @@ -591,7 +591,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( %0, i16 %1, %2, @@ -600,7 +600,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( , i16, , @@ -611,7 +611,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( %0, i16 %1, %2, @@ -620,7 +620,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( , i32, , @@ -631,7 +631,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( %0, i32 %1, %2, @@ -640,7 +640,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( , i32, , @@ -651,7 +651,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( %0, i32 %1, %2, @@ -660,7 +660,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( , i32, , @@ -671,7 +671,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( %0, i32 %1, %2, @@ -680,7 +680,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( , i32, , @@ -691,7 +691,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( %0, i32 %1, %2, @@ -700,7 +700,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( , i32, , @@ -711,7 +711,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( %0, i32 %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll index 5b22e5977ffd1..901df3eea8eac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( , , , @@ -11,7 +11,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( %0, %1, %2, @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( , , , @@ -31,7 +31,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( %0, %1, %2, @@ -40,7 +40,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( , , , @@ -51,7 +51,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( %0, %1, %2, @@ -60,7 +60,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( , , , @@ -71,7 +71,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( %0, %1, %2, @@ -80,7 +80,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( , , , @@ -91,7 +91,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( %0, %1, %2, @@ -100,7 +100,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( , , , @@ -111,7 +111,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( %0, %1, %2, @@ -120,7 +120,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( , , , @@ -131,7 +131,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( %0, %1, %2, @@ -140,7 +140,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( , , , @@ -151,7 +151,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( %0, %1, %2, @@ -160,7 +160,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( , , , @@ -171,7 +171,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( %0, %1, %2, @@ -180,7 +180,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( , , , @@ -191,7 +191,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( %0, %1, %2, @@ -200,7 +200,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( , , , @@ -211,7 +211,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( %0, %1, %2, @@ -220,7 +220,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( , , , @@ -231,7 +231,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( %0, %1, %2, @@ -240,7 +240,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( , , , @@ -251,7 +251,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( %0, %1, %2, @@ -260,7 +260,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( , , , @@ -271,7 +271,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( %0, %1, %2, @@ -280,7 +280,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( , , , @@ -291,7 +291,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( %0, %1, %2, @@ -300,7 +300,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( , , , @@ -311,7 +311,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( %0, %1, %2, @@ -320,7 +320,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( , , , @@ -331,7 +331,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( %0, %1, %2, @@ -340,7 +340,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( , , , @@ -351,7 +351,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( %0, %1, %2, @@ -360,7 +360,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( , , , @@ -371,7 +371,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( %0, %1, %2, @@ -380,7 +380,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( , , , @@ -391,7 +391,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( %0, %1, %2, @@ -400,7 +400,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( , , , @@ -411,7 +411,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( %0, %1, %2, @@ -420,7 +420,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( , , , @@ -431,7 +431,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( , i8, , @@ -451,7 +451,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( %0, i8 %1, %2, @@ -460,7 +460,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( , i8, , @@ -471,7 +471,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( %0, i8 %1, %2, @@ -480,7 +480,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( , i8, , @@ -491,7 +491,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( %0, i8 %1, %2, @@ -500,7 +500,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( , i8, , @@ -511,7 +511,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( %0, i8 %1, %2, @@ -520,7 +520,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( , i8, , @@ -531,7 +531,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( %0, i8 %1, %2, @@ -540,7 +540,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( , i8, , @@ -551,7 +551,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( %0, i8 %1, %2, @@ -560,7 +560,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( +declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( , i8, , @@ -571,7 +571,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( %0, i8 %1, %2, @@ -580,7 +580,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( , i16, , @@ -591,7 +591,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( %0, i16 %1, %2, @@ -600,7 +600,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( , i16, , @@ -611,7 +611,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( %0, i16 %1, %2, @@ -620,7 +620,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( , i16, , @@ -631,7 +631,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( %0, i16 %1, %2, @@ -640,7 +640,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( , i16, , @@ -651,7 +651,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( %0, i16 %1, %2, @@ -660,7 +660,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( , i16, , @@ -671,7 +671,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( %0, i16 %1, %2, @@ -680,7 +680,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( +declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( , i16, , @@ -691,7 +691,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( %0, i16 %1, %2, @@ -700,7 +700,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( , i32, , @@ -711,7 +711,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( %0, i32 %1, %2, @@ -720,7 +720,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( , i32, , @@ -731,7 +731,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( %0, i32 %1, %2, @@ -740,7 +740,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( , i32, , @@ -751,7 +751,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( %0, i32 %1, %2, @@ -760,7 +760,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( , i32, , @@ -771,7 +771,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( %0, i32 %1, %2, @@ -780,7 +780,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( +declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( , i32, , @@ -791,7 +791,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( %0, i32 %1, %2, @@ -800,7 +800,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( , i64, , @@ -811,7 +811,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( %0, i64 %1, %2, @@ -820,7 +820,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( , i64, , @@ -831,7 +831,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( %0, i64 %1, %2, @@ -840,7 +840,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( , i64, , @@ -851,7 +851,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( %0, i64 %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64( +declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( , i64, , @@ -871,7 +871,7 @@ entry: ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64( + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( %0, i64 %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll index d22ac605a20b6..ed540fad7c98f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll @@ -1,503 +1,503 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmv.v.x.nxv1i8.i8( +declare @llvm.riscv.vmv.v.x.nxv1i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8 ; CHECK: vsetvli {{.*}}, a1, e8,mf8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv1i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i8.i8( +declare @llvm.riscv.vmv.v.x.nxv2i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8 ; CHECK: vsetvli {{.*}}, a1, e8,mf4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv2i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i8.i8( +declare @llvm.riscv.vmv.v.x.nxv4i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8 ; CHECK: vsetvli {{.*}}, a1, e8,mf2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv4i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i8.i8( +declare @llvm.riscv.vmv.v.x.nxv8i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8 ; CHECK: vsetvli {{.*}}, a1, e8,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv8i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i8.i8( +declare @llvm.riscv.vmv.v.x.nxv16i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8 ; CHECK: vsetvli {{.*}}, a1, e8,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv16i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv16i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i8.i8( +declare @llvm.riscv.vmv.v.x.nxv32i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8 ; CHECK: vsetvli {{.*}}, a1, e8,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv32i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv32i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv64i8.i8( +declare @llvm.riscv.vmv.v.x.nxv64i8( i8, i32); -define @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8 ; CHECK: vsetvli {{.*}}, a1, e8,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv64i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv64i8( i8 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i16.i16( +declare @llvm.riscv.vmv.v.x.nxv1i16( i16, i32); -define @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16 ; CHECK: vsetvli {{.*}}, a1, e16,mf4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv1i16( i16 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i16.i16( +declare @llvm.riscv.vmv.v.x.nxv2i16( i16, i32); -define @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16 ; CHECK: vsetvli {{.*}}, a1, e16,mf2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv2i16( i16 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i16.i16( +declare @llvm.riscv.vmv.v.x.nxv4i16( i16, i32); -define @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16 ; CHECK: vsetvli {{.*}}, a1, e16,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv4i16( i16 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i16.i16( +declare @llvm.riscv.vmv.v.x.nxv8i16( i16, i32); -define @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16 ; CHECK: vsetvli {{.*}}, a1, e16,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv8i16( i16 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i16.i16( +declare @llvm.riscv.vmv.v.x.nxv16i16( i16, i32); -define @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16 ; CHECK: vsetvli {{.*}}, a1, e16,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv16i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv16i16( i16 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i16.i16( +declare @llvm.riscv.vmv.v.x.nxv32i16( i16, i32); -define @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16 ; CHECK: vsetvli {{.*}}, a1, e16,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv32i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv32i16( i16 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i32.i32( +declare @llvm.riscv.vmv.v.x.nxv1i32( i32, i32); -define @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32 ; CHECK: vsetvli {{.*}}, a1, e32,mf2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv1i32( i32 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i32.i32( +declare @llvm.riscv.vmv.v.x.nxv2i32( i32, i32); -define @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32 ; CHECK: vsetvli {{.*}}, a1, e32,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv2i32( i32 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i32.i32( +declare @llvm.riscv.vmv.v.x.nxv4i32( i32, i32); -define @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32 ; CHECK: vsetvli {{.*}}, a1, e32,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv4i32( i32 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i32.i32( +declare @llvm.riscv.vmv.v.x.nxv8i32( i32, i32); -define @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32 ; CHECK: vsetvli {{.*}}, a1, e32,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv8i32( i32 %0, i32 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i32.i32( +declare @llvm.riscv.vmv.v.x.nxv16i32( i32, i32); -define @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i32 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32 ; CHECK: vsetvli {{.*}}, a1, e32,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv16i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv16i32( i32 %0, i32 %1) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8 ; CHECK: vsetvli {{.*}}, a0, e8,mf8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv1i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8 ; CHECK: vsetvli {{.*}}, a0, e8,mf4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv2i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8 ; CHECK: vsetvli {{.*}}, a0, e8,mf2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv4i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8 ; CHECK: vsetvli {{.*}}, a0, e8,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv8i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv16i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv16i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8 ; CHECK: vsetvli {{.*}}, a0, e8,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv16i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv16i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv32i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv32i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8 ; CHECK: vsetvli {{.*}}, a0, e8,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv32i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv32i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv64i8_i8(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv64i8(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8 ; CHECK: vsetvli {{.*}}, a0, e8,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv64i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv64i8( i8 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i16_i16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16 ; CHECK: vsetvli {{.*}}, a0, e16,mf4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv1i16( i16 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i16_i16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16 ; CHECK: vsetvli {{.*}}, a0, e16,mf2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv2i16( i16 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i16_i16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16 ; CHECK: vsetvli {{.*}}, a0, e16,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv4i16( i16 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i16_i16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16 ; CHECK: vsetvli {{.*}}, a0, e16,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv8i16( i16 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv16i16_i16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv16i16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16 ; CHECK: vsetvli {{.*}}, a0, e16,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv16i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv16i16( i16 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv32i16_i16(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv32i16(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16 ; CHECK: vsetvli {{.*}}, a0, e16,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv32i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv32i16( i16 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i32_i32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32 ; CHECK: vsetvli {{.*}}, a0, e32,mf2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv1i32( i32 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i32_i32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32 ; CHECK: vsetvli {{.*}}, a0, e32,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv2i32( i32 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i32_i32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32 ; CHECK: vsetvli {{.*}}, a0, e32,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv4i32( i32 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i32_i32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32 ; CHECK: vsetvli {{.*}}, a0, e32,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv8i32( i32 9, i32 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv16i32_i32(i32 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv16i32(i32 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32 ; CHECK: vsetvli {{.*}}, a0, e32,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv16i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv16i32( i32 9, i32 %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll index 21b22f6c3f0fc..5b5e303c7b2b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll @@ -1,615 +1,615 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vmv.v.x.nxv1i8.i8( +declare @llvm.riscv.vmv.v.x.nxv1i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8 ; CHECK: vsetvli {{.*}}, a1, e8,mf8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv1i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i8.i8( +declare @llvm.riscv.vmv.v.x.nxv2i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8 ; CHECK: vsetvli {{.*}}, a1, e8,mf4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv2i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i8.i8( +declare @llvm.riscv.vmv.v.x.nxv4i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8 ; CHECK: vsetvli {{.*}}, a1, e8,mf2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv4i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i8.i8( +declare @llvm.riscv.vmv.v.x.nxv8i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8 ; CHECK: vsetvli {{.*}}, a1, e8,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv8i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i8.i8( +declare @llvm.riscv.vmv.v.x.nxv16i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8 ; CHECK: vsetvli {{.*}}, a1, e8,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv16i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv16i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i8.i8( +declare @llvm.riscv.vmv.v.x.nxv32i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8 ; CHECK: vsetvli {{.*}}, a1, e8,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv32i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv32i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv64i8.i8( +declare @llvm.riscv.vmv.v.x.nxv64i8( i8, i64); -define @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8 ; CHECK: vsetvli {{.*}}, a1, e8,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv64i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv64i8( i8 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i16.i16( +declare @llvm.riscv.vmv.v.x.nxv1i16( i16, i64); -define @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16 ; CHECK: vsetvli {{.*}}, a1, e16,mf4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv1i16( i16 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i16.i16( +declare @llvm.riscv.vmv.v.x.nxv2i16( i16, i64); -define @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16 ; CHECK: vsetvli {{.*}}, a1, e16,mf2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv2i16( i16 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i16.i16( +declare @llvm.riscv.vmv.v.x.nxv4i16( i16, i64); -define @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16 ; CHECK: vsetvli {{.*}}, a1, e16,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv4i16( i16 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i16.i16( +declare @llvm.riscv.vmv.v.x.nxv8i16( i16, i64); -define @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16 ; CHECK: vsetvli {{.*}}, a1, e16,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv8i16( i16 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i16.i16( +declare @llvm.riscv.vmv.v.x.nxv16i16( i16, i64); -define @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16 ; CHECK: vsetvli {{.*}}, a1, e16,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv16i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv16i16( i16 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i16.i16( +declare @llvm.riscv.vmv.v.x.nxv32i16( i16, i64); -define @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16 ; CHECK: vsetvli {{.*}}, a1, e16,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv32i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv32i16( i16 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i32.i32( +declare @llvm.riscv.vmv.v.x.nxv1i32( i32, i64); -define @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32 ; CHECK: vsetvli {{.*}}, a1, e32,mf2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv1i32( i32 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i32.i32( +declare @llvm.riscv.vmv.v.x.nxv2i32( i32, i64); -define @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32 ; CHECK: vsetvli {{.*}}, a1, e32,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv2i32( i32 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i32.i32( +declare @llvm.riscv.vmv.v.x.nxv4i32( i32, i64); -define @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32 ; CHECK: vsetvli {{.*}}, a1, e32,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv4i32( i32 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i32.i32( +declare @llvm.riscv.vmv.v.x.nxv8i32( i32, i64); -define @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32 ; CHECK: vsetvli {{.*}}, a1, e32,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv8i32( i32 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i32.i32( +declare @llvm.riscv.vmv.v.x.nxv16i32( i32, i64); -define @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32 ; CHECK: vsetvli {{.*}}, a1, e32,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv16i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv16i32( i32 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i64.i64( +declare @llvm.riscv.vmv.v.x.nxv1i64( i64, i64); -define @intrinsic_vmv.v.x_x_nxv1i64_i64(i64 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64 ; CHECK: vsetvli {{.*}}, a1, e64,m1 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv1i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv1i64( i64 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i64.i64( +declare @llvm.riscv.vmv.v.x.nxv2i64( i64, i64); -define @intrinsic_vmv.v.x_x_nxv2i64_i64(i64 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64 ; CHECK: vsetvli {{.*}}, a1, e64,m2 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv2i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv2i64( i64 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i64.i64( +declare @llvm.riscv.vmv.v.x.nxv4i64( i64, i64); -define @intrinsic_vmv.v.x_x_nxv4i64_i64(i64 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64 ; CHECK: vsetvli {{.*}}, a1, e64,m4 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv4i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv4i64( i64 %0, i64 %1) ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i64.i64( +declare @llvm.riscv.vmv.v.x.nxv8i64( i64, i64); -define @intrinsic_vmv.v.x_x_nxv8i64_i64(i64 %0, i64 %1) nounwind { +define @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i64 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64 ; CHECK: vsetvli {{.*}}, a1, e64,m8 ; CHECK: vmv.v.x {{v[0-9]+}}, a0 - %a = call @llvm.riscv.vmv.v.x.nxv8i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv8i64( i64 %0, i64 %1) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8 ; CHECK: vsetvli {{.*}}, a0, e8,mf8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv1i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8 ; CHECK: vsetvli {{.*}}, a0, e8,mf4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv2i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8 ; CHECK: vsetvli {{.*}}, a0, e8,mf2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv4i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8 ; CHECK: vsetvli {{.*}}, a0, e8,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv8i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv16i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv16i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8 ; CHECK: vsetvli {{.*}}, a0, e8,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv16i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv16i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv32i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv32i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8 ; CHECK: vsetvli {{.*}}, a0, e8,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv32i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv32i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv64i8_i8(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv64i8(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8 ; CHECK: vsetvli {{.*}}, a0, e8,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv64i8.i8( + %a = call @llvm.riscv.vmv.v.x.nxv64i8( i8 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i16_i16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16 ; CHECK: vsetvli {{.*}}, a0, e16,mf4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv1i16( i16 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i16_i16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16 ; CHECK: vsetvli {{.*}}, a0, e16,mf2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv2i16( i16 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i16_i16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16 ; CHECK: vsetvli {{.*}}, a0, e16,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv4i16( i16 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i16_i16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16 ; CHECK: vsetvli {{.*}}, a0, e16,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv8i16( i16 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv16i16_i16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv16i16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16 ; CHECK: vsetvli {{.*}}, a0, e16,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv16i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv16i16( i16 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv32i16_i16(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv32i16(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16 ; CHECK: vsetvli {{.*}}, a0, e16,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv32i16.i16( + %a = call @llvm.riscv.vmv.v.x.nxv32i16( i16 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i32_i32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32 ; CHECK: vsetvli {{.*}}, a0, e32,mf2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv1i32( i32 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i32_i32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32 ; CHECK: vsetvli {{.*}}, a0, e32,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv2i32( i32 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i32_i32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32 ; CHECK: vsetvli {{.*}}, a0, e32,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv4i32( i32 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i32_i32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32 ; CHECK: vsetvli {{.*}}, a0, e32,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv8i32( i32 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv16i32_i32(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv16i32(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32 ; CHECK: vsetvli {{.*}}, a0, e32,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv16i32.i32( + %a = call @llvm.riscv.vmv.v.x.nxv16i32( i32 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv1i64_i64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv1i64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64 ; CHECK: vsetvli {{.*}}, a0, e64,m1 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv1i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv1i64( i64 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv2i64_i64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv2i64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64 ; CHECK: vsetvli {{.*}}, a0, e64,m2 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv2i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv2i64( i64 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv4i64_i64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv4i64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64 ; CHECK: vsetvli {{.*}}, a0, e64,m4 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv4i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv4i64( i64 9, i64 %0) ret %a } -define @intrinsic_vmv.v.x_i_nxv8i64_i64(i64 %0) nounwind { +define @intrinsic_vmv.v.x_i_nxv8i64(i64 %0) nounwind { entry: -; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_i64 +; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64 ; CHECK: vsetvli {{.*}}, a0, e64,m8 ; CHECK: vmv.v.i {{v[0-9]+}}, 9 - %a = call @llvm.riscv.vmv.v.x.nxv8i64.i64( + %a = call @llvm.riscv.vmv.v.x.nxv8i64( i64 9, i64 %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll index a12745886ea12..b9e88f1673f97 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll @@ -1,32 +1,32 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare i32 @llvm.riscv.vpopc.i64.nxv1i1( +declare i32 @llvm.riscv.vpopc.i32.nxv1i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv1i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv1i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv1i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv1i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv1i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv1i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv1i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv1i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv1i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv1i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv1i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1( %0, %1, i32 %2) @@ -34,33 +34,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vpopc.i64.nxv2i1( +declare i32 @llvm.riscv.vpopc.i32.nxv2i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv2i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv2i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv2i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv2i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv2i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv2i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv2i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv2i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv2i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv2i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv2i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1( %0, %1, i32 %2) @@ -68,33 +68,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vpopc.i64.nxv4i1( +declare i32 @llvm.riscv.vpopc.i32.nxv4i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv4i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv4i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv4i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv4i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv4i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv4i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv4i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv4i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv4i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv4i1 ; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv4i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1( %0, %1, i32 %2) @@ -102,33 +102,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vpopc.i64.nxv8i1( +declare i32 @llvm.riscv.vpopc.i32.nxv8i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv8i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv8i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv8i1 ; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv8i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv8i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv8i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv8i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv8i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv8i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv8i1 ; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv8i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1( %0, %1, i32 %2) @@ -136,33 +136,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vpopc.i64.nxv16i1( +declare i32 @llvm.riscv.vpopc.i32.nxv16i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv16i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv16i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv16i1 ; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv16i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv16i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv16i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv16i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv16i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv16i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv16i1 ; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv16i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1( %0, %1, i32 %2) @@ -170,33 +170,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vpopc.i64.nxv32i1( +declare i32 @llvm.riscv.vpopc.i32.nxv32i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv32i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv32i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv32i1 ; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv32i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv32i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv32i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv32i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv32i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv32i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv32i1 ; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv32i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1( %0, %1, i32 %2) @@ -204,33 +204,33 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vpopc.i64.nxv64i1( +declare i32 @llvm.riscv.vpopc.i32.nxv64i1( , i32); -define i32 @intrinsic_vpopc_m_i64_nxv64i1( %0, i32 %1) nounwind { +define i32 @intrinsic_vpopc_m_i32_nxv64i1( %0, i32 %1) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1 +; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv64i1 ; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}} - %a = call i32 @llvm.riscv.vpopc.i64.nxv64i1( + %a = call i32 @llvm.riscv.vpopc.i32.nxv64i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i64.nxv64i1( +declare i32 @llvm.riscv.vpopc.mask.i32.nxv64i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i64_nxv64i1( %0, %1, i32 %2) nounwind { +define i32 @intrinsic_vpopc_mask_m_i32_nxv64i1( %0, %1, i32 %2) nounwind { entry: -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1 +; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv64i1 ; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu ; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t - %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv64i1( + %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1( %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll index 305efe8803618..ff7a2f858dc5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll index eeeeb92424677..b2e8230ab22e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredand.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredand.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredand.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredand.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredand.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredand.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredand.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll index 5a2c7ab416c73..798211e73c359 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll index 939ae4599031a..9dbddfa17670c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll index 734fc33ba4fd8..a25614f1f2137 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll index 68af5ac3897ad..f05d58998aa4f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll index 6da1738afe206..732b354aff3c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll index 6c6bc85ca1597..0f3ac5b216577 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll index 9f26f5dea4916..66c3d07143528 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll index 559a870c8354a..3fe847466db7b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll index 5b0e2f0996750..8aeb10b90e9c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll index 9b80220814114..caeda82fa5ae4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredor.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredor.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredor.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredor.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredor.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredor.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredor.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll index b2b95979526d3..ddc920c681ac5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll index 7641b6ca0a4d9..68f8b173173f4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll index 3ea6a25320858..37aeb333a456d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll index b61705eb81597..9683a3b290560 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll @@ -20,7 +20,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8( , , , @@ -32,7 +32,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8( %0, %1, %2, @@ -62,7 +62,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8( , , , @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8( %0, %1, %2, @@ -104,7 +104,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8( , , , @@ -116,7 +116,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8( %0, %1, %2, @@ -146,7 +146,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8( , , , @@ -158,7 +158,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8( %0, %1, %2, @@ -188,7 +188,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8( , , , @@ -200,7 +200,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8( %0, %1, %2, @@ -230,7 +230,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1( +declare @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8( , , , @@ -242,7 +242,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1( + %a = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8( %0, %1, %2, @@ -272,7 +272,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16( , , , @@ -284,7 +284,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16( %0, %1, %2, @@ -314,7 +314,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16( , , , @@ -326,7 +326,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16( %0, %1, %2, @@ -356,7 +356,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16( , , , @@ -368,7 +368,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16( %0, %1, %2, @@ -398,7 +398,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16( , , , @@ -410,7 +410,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16( %0, %1, %2, @@ -440,7 +440,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16( , , , @@ -452,7 +452,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16( %0, %1, %2, @@ -482,7 +482,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1( +declare @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16( , , , @@ -494,7 +494,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1( + %a = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16( %0, %1, %2, @@ -524,7 +524,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32( , , , @@ -536,7 +536,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32( %0, %1, %2, @@ -566,7 +566,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32( , , , @@ -578,7 +578,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32( %0, %1, %2, @@ -608,7 +608,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32( , , , @@ -620,7 +620,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32( %0, %1, %2, @@ -650,7 +650,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32( , , , @@ -662,7 +662,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32( %0, %1, %2, @@ -692,7 +692,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1( +declare @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32( , , , @@ -704,7 +704,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1( + %a = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32( %0, %1, %2, @@ -734,7 +734,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.nxv1i1( +declare @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64( , , , @@ -746,7 +746,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.nxv1i1( + %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64( %0, %1, %2, @@ -776,7 +776,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.nxv2i1( +declare @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64( , , , @@ -788,7 +788,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.nxv2i1( + %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64( %0, %1, %2, @@ -818,7 +818,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.nxv4i1( +declare @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64( , , , @@ -830,7 +830,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.nxv4i1( + %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64( %0, %1, %2, @@ -860,7 +860,7 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.nxv8i1( +declare @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64( , , , @@ -872,7 +872,7 @@ entry: ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu ; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.nxv8i1( + %a = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64( %0, %1, %2,