68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down
84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -734,7 +734,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1(
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
Expand All @@ -746,7 +746,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -776,7 +776,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1(
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
Expand All @@ -788,7 +788,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -818,7 +818,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1(
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
Expand All @@ -830,7 +830,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -860,7 +860,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1(
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
Expand All @@ -872,7 +872,7 @@ entry:
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down
84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -734,7 +734,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
Expand All @@ -746,7 +746,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -776,7 +776,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
Expand All @@ -788,7 +788,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -818,7 +818,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
Expand All @@ -830,7 +830,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -860,7 +860,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
Expand All @@ -872,7 +872,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down
84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -734,7 +734,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
Expand All @@ -746,7 +746,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -776,7 +776,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
Expand All @@ -788,7 +788,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -818,7 +818,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
Expand All @@ -830,7 +830,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down Expand Up @@ -860,7 +860,7 @@ entry:
ret <vscale x 1 x i64> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1(
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
Expand All @@ -872,7 +872,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1(
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8>,
<vscale x 1 x i8>,
<vscale x 8 x i8>,
Expand All @@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
<vscale x 8 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -62,7 +62,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8>,
<vscale x 2 x i8>,
<vscale x 8 x i8>,
Expand All @@ -74,7 +74,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
<vscale x 8 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -104,7 +104,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8>,
<vscale x 4 x i8>,
<vscale x 8 x i8>,
Expand All @@ -116,7 +116,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
<vscale x 8 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -146,7 +146,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
Expand All @@ -158,7 +158,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -188,7 +188,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8>,
<vscale x 16 x i8>,
<vscale x 8 x i8>,
Expand All @@ -200,7 +200,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
<vscale x 8 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -230,7 +230,7 @@ entry:
ret <vscale x 8 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8>,
<vscale x 32 x i8>,
<vscale x 8 x i8>,
Expand All @@ -242,7 +242,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
%a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
<vscale x 8 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 8 x i8> %2,
Expand Down Expand Up @@ -272,7 +272,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16>,
<vscale x 1 x i16>,
<vscale x 4 x i16>,
Expand All @@ -284,7 +284,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
<vscale x 4 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -314,7 +314,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16>,
<vscale x 2 x i16>,
<vscale x 4 x i16>,
Expand All @@ -326,7 +326,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
<vscale x 4 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -356,7 +356,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
Expand All @@ -368,7 +368,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -398,7 +398,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16>,
<vscale x 8 x i16>,
<vscale x 4 x i16>,
Expand All @@ -410,7 +410,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
<vscale x 4 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -440,7 +440,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16>,
<vscale x 16 x i16>,
<vscale x 4 x i16>,
Expand All @@ -452,7 +452,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
<vscale x 4 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -482,7 +482,7 @@ entry:
ret <vscale x 4 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16>,
<vscale x 32 x i16>,
<vscale x 4 x i16>,
Expand All @@ -494,7 +494,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
%a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
<vscale x 4 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 4 x i16> %2,
Expand Down Expand Up @@ -524,7 +524,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32>,
<vscale x 1 x i32>,
<vscale x 2 x i32>,
Expand All @@ -536,7 +536,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
<vscale x 2 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -566,7 +566,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
Expand All @@ -578,7 +578,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -608,7 +608,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32>,
<vscale x 4 x i32>,
<vscale x 2 x i32>,
Expand All @@ -620,7 +620,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
<vscale x 2 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -650,7 +650,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32>,
<vscale x 8 x i32>,
<vscale x 2 x i32>,
Expand All @@ -662,7 +662,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
<vscale x 2 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down Expand Up @@ -692,7 +692,7 @@ entry:
ret <vscale x 2 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32>,
<vscale x 16 x i32>,
<vscale x 2 x i32>,
Expand All @@ -704,7 +704,7 @@ entry:
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
%a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
<vscale x 2 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 2 x i32> %2,
Expand Down
Loading