12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(ptr %out, ptr %in, <v
%wide.load = load <vscale x 16 x i8>, ptr %2, align 1
%3 = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
%4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> undef)
ret <vscale x 16 x i8> %wide.masked.gather
}

Expand All @@ -121,7 +121,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(ptr %out, ptr %in, <v
%wide.load = load <vscale x 16 x i8>, ptr %2, align 1
%3 = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
%4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> undef)
ret <vscale x 16 x i8> %wide.masked.gather
}

Expand All @@ -141,7 +141,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(ptr %out, ptr %in, <
%wide.load = load <vscale x 8 x i16>, ptr %2, align 1
%3 = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
%4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
ret <vscale x 8 x i16> %wide.masked.gather
}

Expand All @@ -161,7 +161,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(ptr %out, ptr %in, <
%wide.load = load <vscale x 8 x i16>, ptr %2, align 1
%3 = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
%4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
ret <vscale x 8 x i16> %wide.masked.gather
}

Expand All @@ -177,7 +177,7 @@ define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(ptr %out, ptr %in, <vs
%wide.load = load <vscale x 4 x i32>, ptr %2, align 1
%3 = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
%4 = getelementptr inbounds i32, ptr %in, <vscale x 4 x i64> %3
%wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> undef)
%wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> undef)
ret <vscale x 4 x i32> %wide.masked.gather
}

Expand All @@ -192,7 +192,7 @@ define <vscale x 2 x i64> @no_narrow_i64_gather_index_i64(ptr %out, ptr %in, <vs
%2 = bitcast ptr %1 to ptr
%wide.load = load <vscale x 2 x i64>, ptr %2, align 1
%3 = getelementptr inbounds i64, ptr %in, <vscale x 2 x i64> %wide.load
%wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i64> undef)
%wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> undef)
ret <vscale x 2 x i64> %wide.masked.gather
}

Expand Down
162 changes: 81 additions & 81 deletions llvm/test/CodeGen/AArch64/sve-hadd.ll

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -771,7 +771,7 @@ define <vscale x 4 x i32> @sdiv_const(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
entry:
%div = sdiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 3, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer)
%div = sdiv <vscale x 4 x i32> %a, splat (i32 3)
ret <vscale x 4 x i32> %div
}

Expand All @@ -783,7 +783,7 @@ define <vscale x 4 x i32> @udiv_const(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
entry:
%div = udiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 3, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer)
%div = udiv <vscale x 4 x i32> %a, splat (i32 3)
ret <vscale x 4 x i32> %div
}

Expand All @@ -795,9 +795,9 @@ define <vscale x 8 x i16> @uqsub(<vscale x 8 x i16> %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: uqsub z0.h, z0.h, #32768 // =0x8000
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 0, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%sub = xor <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 -32768, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%sel = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %sub, <vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 0, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%cmp = icmp slt <vscale x 8 x i16> %a, zeroinitializer
%sub = xor <vscale x 8 x i16> %a, splat (i16 -32768)
%sel = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %sub, <vscale x 8 x i16> zeroinitializer
ret <vscale x 8 x i16> %sel
}

Expand Down
36 changes: 18 additions & 18 deletions llvm/test/CodeGen/AArch64/sve-int-arith.ll
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,7 @@ define <vscale x 2 x i64> @mls_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
; CHECK-NEXT: ret
{
%1 = mul <vscale x 2 x i64> %a, %b
%2 = add <vscale x 2 x i64> %1, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 4294967295, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%2 = add <vscale x 2 x i64> %1, splat (i64 4294967295)
ret <vscale x 2 x i64> %2
}

Expand All @@ -551,7 +551,7 @@ define <vscale x 2 x i64> @muladd_i64_negativeAddend(<vscale x 2 x i64> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 2 x i64> %a, %b
%2 = add <vscale x 2 x i64> %1, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 -4294967295, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%2 = add <vscale x 2 x i64> %1, splat (i64 -4294967295)
ret <vscale x 2 x i64> %2
}

Expand All @@ -565,7 +565,7 @@ define <vscale x 4 x i32> @muladd_i32_positiveAddend(<vscale x 4 x i32> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 4 x i32> %a, %b
%2 = add <vscale x 4 x i32> %1, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 65536, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%2 = add <vscale x 4 x i32> %1, splat (i32 65536)
ret <vscale x 4 x i32> %2
}

Expand All @@ -578,7 +578,7 @@ define <vscale x 4 x i32> @muladd_i32_negativeAddend(<vscale x 4 x i32> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 4 x i32> %a, %b
%2 = add <vscale x 4 x i32> %1, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -65536, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%2 = add <vscale x 4 x i32> %1, splat (i32 -65536)
ret <vscale x 4 x i32> %2
}

Expand All @@ -591,7 +591,7 @@ define <vscale x 8 x i16> @muladd_i16_positiveAddend(<vscale x 8 x i16> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 8 x i16> %a, %b
%2 = add <vscale x 8 x i16> %1, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 255, i16 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%2 = add <vscale x 8 x i16> %1, splat (i16 255)
ret <vscale x 8 x i16> %2
}

Expand All @@ -604,7 +604,7 @@ define <vscale x 8 x i16> @muladd_i16_negativeAddend(<vscale x 8 x i16> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 8 x i16> %a, %b
%2 = add <vscale x 8 x i16> %1, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -255, i16 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%2 = add <vscale x 8 x i16> %1, splat (i16 -255)
ret <vscale x 8 x i16> %2
}

Expand All @@ -617,7 +617,7 @@ define <vscale x 16 x i8> @muladd_i8_positiveAddend(<vscale x 16 x i8> %a, <vsca
; CHECK-NEXT: ret
{
%1 = mul <vscale x 16 x i8> %a, %b
%2 = add <vscale x 16 x i8> %1, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 15, i8 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%2 = add <vscale x 16 x i8> %1, splat (i8 15)
ret <vscale x 16 x i8> %2
}

Expand All @@ -630,7 +630,7 @@ define <vscale x 16 x i8> @muladd_i8_negativeAddend(<vscale x 16 x i8> %a, <vsca
; CHECK-NEXT: ret
{
%1 = mul <vscale x 16 x i8> %a, %b
%2 = add <vscale x 16 x i8> %1, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -15, i8 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%2 = add <vscale x 16 x i8> %1, splat (i8 -15)
ret <vscale x 16 x i8> %2
}

Expand All @@ -644,7 +644,7 @@ define <vscale x 2 x i64> @mulsub_i64_positiveAddend(<vscale x 2 x i64> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 2 x i64> %a, %b
%2 = sub <vscale x 2 x i64> %1, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 4294967295, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%2 = sub <vscale x 2 x i64> %1, splat (i64 4294967295)
ret <vscale x 2 x i64> %2
}

Expand All @@ -658,7 +658,7 @@ define <vscale x 2 x i64> @mulsub_i64_negativeAddend(<vscale x 2 x i64> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 2 x i64> %a, %b
%2 = sub <vscale x 2 x i64> %1, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 -4294967295, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%2 = sub <vscale x 2 x i64> %1, splat (i64 -4294967295)
ret <vscale x 2 x i64> %2
}

Expand All @@ -673,7 +673,7 @@ define <vscale x 4 x i32> @mulsub_i32_positiveAddend(<vscale x 4 x i32> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 4 x i32> %a, %b
%2 = sub <vscale x 4 x i32> %1, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 65536, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%2 = sub <vscale x 4 x i32> %1, splat (i32 65536)
ret <vscale x 4 x i32> %2
}

Expand All @@ -687,7 +687,7 @@ define <vscale x 4 x i32> @mulsub_i32_negativeAddend(<vscale x 4 x i32> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 4 x i32> %a, %b
%2 = sub <vscale x 4 x i32> %1, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -65536, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%2 = sub <vscale x 4 x i32> %1, splat (i32 -65536)
ret <vscale x 4 x i32> %2
}

Expand All @@ -700,7 +700,7 @@ define <vscale x 8 x i16> @mulsub_i16_positiveAddend(<vscale x 8 x i16> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 8 x i16> %a, %b
%2 = sub <vscale x 8 x i16> %1, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 255, i16 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%2 = sub <vscale x 8 x i16> %1, splat (i16 255)
ret <vscale x 8 x i16> %2
}

Expand All @@ -714,7 +714,7 @@ define <vscale x 8 x i16> @mulsub_i16_negativeAddend(<vscale x 8 x i16> %a, <vsc
; CHECK-NEXT: ret
{
%1 = mul <vscale x 8 x i16> %a, %b
%2 = sub <vscale x 8 x i16> %1, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -255, i16 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%2 = sub <vscale x 8 x i16> %1, splat (i16 -255)
ret <vscale x 8 x i16> %2
}

Expand All @@ -727,7 +727,7 @@ define <vscale x 16 x i8> @mulsub_i8_positiveAddend(<vscale x 16 x i8> %a, <vsca
; CHECK-NEXT: ret
{
%1 = mul <vscale x 16 x i8> %a, %b
%2 = sub <vscale x 16 x i8> %1, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 15, i8 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%2 = sub <vscale x 16 x i8> %1, splat (i8 15)
ret <vscale x 16 x i8> %2
}

Expand All @@ -740,7 +740,7 @@ define <vscale x 16 x i8> @mulsub_i8_negativeAddend(<vscale x 16 x i8> %a, <vsca
; CHECK-NEXT: ret
{
%1 = mul <vscale x 16 x i8> %a, %b
%2 = sub <vscale x 16 x i8> %1, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -15, i8 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%2 = sub <vscale x 16 x i8> %1, splat (i8 -15)
ret <vscale x 16 x i8> %2
}

Expand All @@ -757,7 +757,7 @@ define <vscale x 8 x i16> @multiple_fused_ops(<vscale x 8 x i16> %a, <vscale x 8
; CHECK-NEXT: ret
{
%1 = mul <vscale x 8 x i16> %a, %b
%2 = add <vscale x 8 x i16> %1, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 200, i16 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%2 = add <vscale x 8 x i16> %1, splat (i16 200)
%3 = mul <vscale x 8 x i16> %2, %a
%4 = sub <vscale x 8 x i16> %3, %b
ret <vscale x 8 x i16> %4
Expand Down Expand Up @@ -805,7 +805,7 @@ vector.body: ; preds = %vector.body, %for.b
%3 = getelementptr inbounds i32, ptr %src2, i64 %index
%wide.masked.load12 = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %3, i32 4, <vscale x 4 x i1> %active.lane.mask, <vscale x 4 x i32> poison)
%4 = mul nsw <vscale x 4 x i32> %wide.masked.load12, %wide.masked.load
%5 = add nsw <vscale x 4 x i32> %4, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%5 = add nsw <vscale x 4 x i32> %4, splat (i32 1)
%6 = getelementptr inbounds i32, ptr %dst, i64 %index
tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> %5, ptr %6, i32 4, <vscale x 4 x i1> %active.lane.mask)
%index.next = add i64 %index, %1
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-knownbits.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ define <vscale x 8 x i16> @test_knownzero(<vscale x 8 x i16> %x) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.h, #0 // =0x0
; CHECK-NEXT: ret
%a1 = shl <vscale x 8 x i16> %x, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%a2 = and <vscale x 8 x i16> %a1, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%a1 = shl <vscale x 8 x i16> %x, splat (i16 8)
%a2 = and <vscale x 8 x i16> %a1, splat (i16 8)
ret <vscale x 8 x i16> %a2
}

Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sve-pred-selectop.ll
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ define <vscale x 4 x i32> @andnot_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x i32
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
%y1 = xor <vscale x 4 x i32> %y, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%y1 = xor <vscale x 4 x i32> %y, splat (i32 -1)
%a = and <vscale x 4 x i32> %x, %y1
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
Expand All @@ -297,7 +297,7 @@ define <vscale x 8 x i16> @andnot_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x i16
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
%y1 = xor <vscale x 8 x i16> %y, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%y1 = xor <vscale x 8 x i16> %y, splat (i16 -1)
%a = and <vscale x 8 x i16> %x, %y1
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
Expand All @@ -313,7 +313,7 @@ define <vscale x 16 x i8> @andnot_v16i8(<vscale x 16 x i8> %z, <vscale x 16 x i8
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
%y1 = xor <vscale x 16 x i8> %y, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%y1 = xor <vscale x 16 x i8> %y, splat (i8 -1)
%a = and <vscale x 16 x i8> %x, %y1
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
Expand All @@ -331,7 +331,7 @@ define <vscale x 4 x i32> @ornot_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
%y1 = xor <vscale x 4 x i32> %y, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%y1 = xor <vscale x 4 x i32> %y, splat (i32 -1)
%a = or <vscale x 4 x i32> %x, %y1
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
Expand All @@ -349,7 +349,7 @@ define <vscale x 8 x i16> @ornot_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
%y1 = xor <vscale x 8 x i16> %y, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%y1 = xor <vscale x 8 x i16> %y, splat (i16 -1)
%a = or <vscale x 8 x i16> %x, %y1
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
Expand All @@ -367,7 +367,7 @@ define <vscale x 16 x i8> @ornot_v16i8(<vscale x 16 x i8> %z, <vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
%y1 = xor <vscale x 16 x i8> %y, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%y1 = xor <vscale x 16 x i8> %y, splat (i8 -1)
%a = or <vscale x 16 x i8> %x, %y1
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
Expand Down
70 changes: 35 additions & 35 deletions llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/sve-sdiv-pow2.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ define <vscale x 16 x i8> @sdiv_i8(<vscale x 16 x i8> %a) #0 {
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #4
; CHECK-NEXT: ret
%out = sdiv <vscale x 16 x i8> %a, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 16, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%out = sdiv <vscale x 16 x i8> %a, splat (i8 16)
ret <vscale x 16 x i8> %out
}

Expand All @@ -20,7 +20,7 @@ define <vscale x 16 x i8> @sdiv_i8_neg(<vscale x 16 x i8> %a) #0 {
; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #6
; CHECK-NEXT: subr z0.b, z0.b, #0 // =0x0
; CHECK-NEXT: ret
%out = sdiv <vscale x 16 x i8> %a, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -64, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
%out = sdiv <vscale x 16 x i8> %a, splat (i8 -64)
ret <vscale x 16 x i8> %out
}

Expand All @@ -30,7 +30,7 @@ define <vscale x 8 x i16> @sdiv_i16(<vscale x 8 x i16> %a) #0 {
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #10
; CHECK-NEXT: ret
%out = sdiv <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 1024, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%out = sdiv <vscale x 8 x i16> %a, splat (i16 1024)
ret <vscale x 8 x i16> %out
}

Expand All @@ -41,7 +41,7 @@ define <vscale x 8 x i16> @sdiv_i16_neg(<vscale x 8 x i16> %a) #0 {
; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #12
; CHECK-NEXT: subr z0.h, z0.h, #0 // =0x0
; CHECK-NEXT: ret
%out = sdiv <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -4096, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%out = sdiv <vscale x 8 x i16> %a, splat (i16 -4096)
ret <vscale x 8 x i16> %out
}

Expand All @@ -51,7 +51,7 @@ define <vscale x 4 x i32> @sdiv_i32(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #23
; CHECK-NEXT: ret
%out = sdiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 8388608, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%out = sdiv <vscale x 4 x i32> %a, splat (i32 8388608)
ret <vscale x 4 x i32> %out
}

Expand All @@ -62,7 +62,7 @@ define <vscale x 4 x i32> @sdiv_i32_neg(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #25
; CHECK-NEXT: subr z0.s, z0.s, #0 // =0x0
; CHECK-NEXT: ret
%out = sdiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -33554432, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%out = sdiv <vscale x 4 x i32> %a, splat (i32 -33554432)
ret <vscale x 4 x i32> %out
}

Expand All @@ -72,7 +72,7 @@ define <vscale x 2 x i64> @sdiv_i64(<vscale x 2 x i64> %a) #0 {
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #53
; CHECK-NEXT: ret
%out = sdiv <vscale x 2 x i64> %a, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 9007199254740992, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%out = sdiv <vscale x 2 x i64> %a, splat (i64 9007199254740992)
ret <vscale x 2 x i64> %out
}

Expand All @@ -83,7 +83,7 @@ define <vscale x 2 x i64> @sdiv_i64_neg(<vscale x 2 x i64> %a) #0 {
; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #55
; CHECK-NEXT: subr z0.d, z0.d, #0 // =0x0
; CHECK-NEXT: ret
%out = sdiv <vscale x 2 x i64> %a, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 -36028797018963968, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%out = sdiv <vscale x 2 x i64> %a, splat (i64 -36028797018963968)
ret <vscale x 2 x i64> %out
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-splat-sext.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ define <vscale x 8 x i16> @sext_splat_v8i16_128() {
; CHECK-NEXT: ret
%i = insertelement <vscale x 8 x i16> poison, i16 128, i32 0
%s = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = shl <vscale x 8 x i16> %s, shufflevector (<vscale x 8 x i16> insertelement(<vscale x 8 x i16> undef, i16 8, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%b = ashr <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement(<vscale x 8 x i16> undef, i16 8, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%a = shl <vscale x 8 x i16> %s, splat (i16 8)
%b = ashr <vscale x 8 x i16> %a, splat (i16 8)
ret <vscale x 8 x i16> %b
}

Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/sve-srem-combine-loop.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ define <vscale x 4 x i32> @srem_combine_loop(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: asrd z1.s, p0/m, z1.s, #1
; CHECK-NEXT: mls z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
%rem = srem <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 2, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%rem = srem <vscale x 4 x i32> %a, splat (i32 2)
ret <vscale x 4 x i32> %rem
}

Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions llvm/test/CodeGen/Generic/expand-vp-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %ev
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64.p0(ptr [[PTR:%.*]], i32 1, <vscale x 1 x i1> [[TMP2]], <vscale x 1 x i64> poison)
; CHECK-NEXT: ret <vscale x 1 x i64> [[TMP3]]
;
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %evl)
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x i64> %load
}

Expand All @@ -140,7 +140,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask_vscale(ptr %ptr) {
;
%vscale = call i32 @llvm.vscale.i32()
%vlmax = mul nuw i32 %vscale, 1
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %vlmax)
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %vlmax)
ret <vscale x 1 x i64> %load
}

Expand Down Expand Up @@ -179,7 +179,7 @@ define void @vpstore_nxv1i64_allones_mask(<vscale x 1 x i64> %val, ptr %ptr, i32
; CHECK-NEXT: call void @llvm.masked.store.nxv1i64.p0(<vscale x 1 x i64> [[VAL:%.*]], ptr [[PTR:%.*]], i32 1, <vscale x 1 x i1> [[TMP2]])
; CHECK-NEXT: ret void
;
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %evl)
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret void
}

Expand All @@ -192,7 +192,7 @@ define void @vpstore_nxv1i64_allones_mask_vscale(<vscale x 1 x i64> %val, ptr %p
;
%vscale = call i32 @llvm.vscale.i32()
%vlmax = mul nuw i32 %vscale, 1
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %vlmax)
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %vlmax)
ret void
}

Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_ult_two(<vscale x 16 x i32> %va) {
; CHECK-ZVBB-NEXT: vmsleu.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
%cmp = icmp ult <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 2, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
%cmp = icmp ult <vscale x 16 x i32> %a, splat (i32 2)
ret <vscale x 16 x i1> %cmp
}

Expand All @@ -626,7 +626,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_ugt_one(<vscale x 16 x i32> %va) {
; CHECK-ZVBB-NEXT: vmsgtu.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
%cmp = icmp ugt <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 1, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
%cmp = icmp ugt <vscale x 16 x i32> %a, splat (i32 1)
ret <vscale x 16 x i1> %cmp
}

Expand All @@ -646,7 +646,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_eq_one(<vscale x 16 x i32> %va) {
; CHECK-ZVBB-NEXT: vmseq.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
%cmp = icmp eq <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 1, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
%cmp = icmp eq <vscale x 16 x i32> %a, splat (i32 1)
ret <vscale x 16 x i1> %cmp
}

Expand All @@ -666,7 +666,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_ne_one(<vscale x 16 x i32> %va) {
; CHECK-ZVBB-NEXT: vmsne.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
%cmp = icmp ne <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 1, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
%cmp = icmp ne <vscale x 16 x i32> %a, splat (i32 1)
ret <vscale x 16 x i1> %cmp
}

Expand Down Expand Up @@ -1020,7 +1020,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_ult_two(<vscale x 8 x i64> %va) {
; CHECK-ZVBB-NEXT: vmsleu.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
%cmp = icmp ult <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 2, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
%cmp = icmp ult <vscale x 8 x i64> %a, splat (i64 2)
ret <vscale x 8 x i1> %cmp
}

Expand All @@ -1040,7 +1040,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_ugt_one(<vscale x 8 x i64> %va) {
; CHECK-ZVBB-NEXT: vmsgtu.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
%cmp = icmp ugt <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
%cmp = icmp ugt <vscale x 8 x i64> %a, splat (i64 1)
ret <vscale x 8 x i1> %cmp
}

Expand All @@ -1060,7 +1060,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_eq_one(<vscale x 8 x i64> %va) {
; CHECK-ZVBB-NEXT: vmseq.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
%cmp = icmp eq <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
%cmp = icmp eq <vscale x 8 x i64> %a, splat (i64 1)
ret <vscale x 8 x i1> %cmp
}

Expand All @@ -1080,7 +1080,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_ne_one(<vscale x 8 x i64> %va) {
; CHECK-ZVBB-NEXT: vmsne.vi v0, v8, 1
; CHECK-ZVBB-NEXT: ret
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
%cmp = icmp ne <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
%cmp = icmp ne <vscale x 8 x i64> %a, splat (i64 1)
ret <vscale x 8 x i1> %cmp
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ define void @strided_store_zero_start(i64 %n, ptr %p) {
; RV64-NEXT: ret
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
%gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %step, i32 6
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
ret void
}

Expand Down Expand Up @@ -93,7 +93,7 @@ define void @strided_store_offset_start(i64 %n, ptr %p) {
%.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
%add = add <vscale x 1 x i64> %step, %.splat
%gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %add, i32 6
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
ret void
}

Expand All @@ -118,7 +118,7 @@ define void @stride_one_store(i64 %n, ptr %p) {
; RV64-NEXT: ret
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
%gep = getelementptr inbounds i64, ptr %p, <vscale x 1 x i64> %step
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
ret void
}

Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/RISCV/rvv/pr61561.ll
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ define <vscale x 4 x i8> @foo(ptr %p) {
; CHECK-NEXT: ret
%i13 = load <vscale x 4 x i16>, ptr %p, align 2
%i14 = zext <vscale x 4 x i16> %i13 to <vscale x 4 x i32>
%i15 = shl nuw nsw <vscale x 4 x i32> %i14, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 3, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%i16 = and <vscale x 4 x i32> %i15, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 248, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%i17 = mul nuw nsw <vscale x 4 x i32> %i16, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 3735, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%i18 = add nuw nsw <vscale x 4 x i32> %i17, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 16384, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%i21 = lshr <vscale x 4 x i32> %i18, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 15, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%i15 = shl nuw nsw <vscale x 4 x i32> %i14, splat (i32 3)
%i16 = and <vscale x 4 x i32> %i15, splat (i32 248)
%i17 = mul nuw nsw <vscale x 4 x i32> %i16, splat (i32 3735)
%i18 = add nuw nsw <vscale x 4 x i32> %i17, splat (i32 16384)
%i21 = lshr <vscale x 4 x i32> %i18, splat (i32 15)
%i22 = trunc <vscale x 4 x i32> %i21 to <vscale x 4 x i8>
ret <vscale x 4 x i8> %i22
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/pr63459.ll
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ define void @snork(ptr %arg, <vscale x 2 x i64> %arg1) {
; CHECK-NEXT: ret
bb:
%getelementptr = getelementptr inbounds <vscale x 2 x i32>, ptr %arg, <vscale x 2 x i64> %arg1
tail call void @llvm.vp.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x ptr> align 4 %getelementptr, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 4)
tail call void @llvm.vp.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> splat (i32 1), <vscale x 2 x ptr> align 4 %getelementptr, <vscale x 2 x i1> splat (i1 true), i32 4)
ret void
}

Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/RISCV/rvv/stepvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ define <vscale x 2 x i64> @hi_bits_known_zero() vscale_range(2, 4) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: ret
%step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
%and = and <vscale x 2 x i64> %step, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 u0xfffffffffffffff8, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%and = and <vscale x 2 x i64> %step, splat (i64 u0xfffffffffffffff8)
ret <vscale x 2 x i64> %and
}

Expand All @@ -758,8 +758,8 @@ define <vscale x 2 x i64> @hi_bits_known_zero_overflow() vscale_range(2, 4) {
; CHECK-NEXT: vand.vi v8, v8, -8
; CHECK-NEXT: ret
%step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
%step.mul = mul <vscale x 2 x i64> %step, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 u0xffffffffffffffff, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%and = and <vscale x 2 x i64> %step.mul, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 u0xfffffffffffffff8, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%step.mul = mul <vscale x 2 x i64> %step, splat (i64 u0xffffffffffffffff)
%and = and <vscale x 2 x i64> %step.mul, splat (i64 u0xfffffffffffffff8)
ret <vscale x 2 x i64> %and
}

Expand All @@ -771,7 +771,7 @@ define <vscale x 2 x i64> @lo_bits_known_zero() {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: ret
%step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
%step.mul = mul <vscale x 2 x i64> %step, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 8, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%and = and <vscale x 2 x i64> %step.mul, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 7, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
%step.mul = mul <vscale x 2 x i64> %step, splat (i64 8)
%and = and <vscale x 2 x i64> %step.mul, splat (i64 7)
ret <vscale x 2 x i64> %and
}
38 changes: 19 additions & 19 deletions llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ define <vscale x 1 x i64> @gather(ptr %a, i32 %len) {
; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACCUM:%.*]] = phi <vscale x 1 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]], i32 3
; CHECK-NEXT: [[GATHER:%.*]] = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> undef, ptr [[TMP1]], i64 16, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
; CHECK-NEXT: [[GATHER:%.*]] = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> undef, ptr [[TMP1]], i64 16, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
; CHECK-NEXT: [[ACCUM_NEXT]] = add <vscale x 1 x i64> [[ACCUM]], [[GATHER]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP0]]
; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], [[TMP0]]
Expand All @@ -38,7 +38,7 @@ vector.body: ; preds = %vector.body, %vecto
%vec.ind = phi <vscale x 1 x i64> [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ]
%accum = phi <vscale x 1 x i64> [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ]
%2 = getelementptr inbounds %struct.foo, ptr %a, <vscale x 1 x i64> %vec.ind, i32 3
%gather = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> %2, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> undef)
%gather = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> %2, i32 8, <vscale x 1 x i1> splat (i1 true), <vscale x 1 x i64> undef)
%accum.next = add <vscale x 1 x i64> %accum, %gather
%index.next = add nuw i64 %index, %0
%vec.ind.next = add <vscale x 1 x i64> %vec.ind, %.splat
Expand All @@ -59,7 +59,7 @@ define <vscale x 1 x i64> @gather_disjoint_or(ptr %a, i64 %len) {
; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 1, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACCUM:%.*]] = phi <vscale x 1 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]]
; CHECK-NEXT: [[GATHER:%.*]] = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> poison, ptr [[TMP0]], i64 16, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
; CHECK-NEXT: [[GATHER:%.*]] = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> poison, ptr [[TMP0]], i64 16, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
; CHECK-NEXT: [[ACCUM_NEXT]] = add <vscale x 1 x i64> [[ACCUM]], [[GATHER]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[VSCALE]]
; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], 2
Expand All @@ -71,7 +71,7 @@ define <vscale x 1 x i64> @gather_disjoint_or(ptr %a, i64 %len) {
vector.ph:
%vscale = call i64 @llvm.vscale.i64()
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
%step.mul2 = shl <vscale x 1 x i64> %step, shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer)
%step.mul2 = shl <vscale x 1 x i64> %step, splat (i64 1)
br label %vector.body

vector.body: ; preds = %vector.body, %vector.ph
Expand All @@ -80,19 +80,19 @@ vector.body: ; preds = %vector.body, %vecto

%accum = phi <vscale x 1 x i64> [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ]

%vec.ind.or = or disjoint <vscale x 1 x i64> %vec.ind, shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 1, i64 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer)
%vec.ind.or = or disjoint <vscale x 1 x i64> %vec.ind, splat (i64 1)

%gep = getelementptr i64, ptr %a, <vscale x 1 x i64> %vec.ind.or
%gather = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %gep,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 true),
<vscale x 1 x i64> poison
)

%accum.next = add <vscale x 1 x i64> %accum, %gather
%index.next = add nuw i64 %index, %vscale
%vec.ind.next = add <vscale x 1 x i64> %vec.ind, shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 2, i64 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer)
%vec.ind.next = add <vscale x 1 x i64> %vec.ind, splat (i64 2)

%exit = icmp ne i64 %index.next, %len
br i1 %exit, label %for.cond.cleanup, label %vector.body
Expand All @@ -111,7 +111,7 @@ define void @scatter(ptr %a, i32 %len) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]], i32 3
; CHECK-NEXT: call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64(<vscale x 1 x i64> zeroinitializer, ptr [[TMP1]], i64 16, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
; CHECK-NEXT: call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64(<vscale x 1 x i64> zeroinitializer, ptr [[TMP1]], i64 16, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP0]]
; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[INDEX_NEXT]], [[WIDE_TRIP_COUNT]]
Expand All @@ -131,7 +131,7 @@ vector.body: ; preds = %vector.body, %vecto
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%vec.ind = phi <vscale x 1 x i64> [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ]
%2 = getelementptr inbounds %struct.foo, ptr %a, <vscale x 1 x i64> %vec.ind, i32 3
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %2, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %2, i32 8, <vscale x 1 x i1> splat (i1 true))
%index.next = add nuw i64 %index, %0
%vec.ind.next = add <vscale x 1 x i64> %vec.ind, %.splat
%3 = icmp ne i64 %index.next, %wide.trip.count
Expand All @@ -155,7 +155,7 @@ define <vscale x 1 x i64> @gather_loopless(ptr %p, i64 %stride) {
%x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 1),
<vscale x 1 x i64> poison
)
ret <vscale x 1 x i64> %x
Expand All @@ -175,7 +175,7 @@ define <vscale x 1 x i64> @straightline_offset_add(ptr %p, i64 %offset) {
%x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 1),
<vscale x 1 x i64> poison
)
ret <vscale x 1 x i64> %x
Expand All @@ -188,13 +188,13 @@ define <vscale x 1 x i64> @straightline_offset_disjoint_or(ptr %p, i64 %offset)
; CHECK-NEXT: ret <vscale x 1 x i64> [[X]]
;
%step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
%step.shl = shl <vscale x 1 x i64> %step, shufflevector (<vscale x 1 x i64> insertelement (<vscale x 1 x i64> poison, i64 1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer)
%offsetv = or disjoint <vscale x 1 x i64> %step.shl, shufflevector (<vscale x 1 x i64> insertelement (<vscale x 1 x i64> poison, i64 1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer)
%step.shl = shl <vscale x 1 x i64> %step, splat (i64 1)
%offsetv = or disjoint <vscale x 1 x i64> %step.shl, splat (i64 1)
%ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offsetv
%x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 true),
<vscale x 1 x i64> poison
)
ret <vscale x 1 x i64> %x
Expand All @@ -213,7 +213,7 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
%x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 1),
<vscale x 1 x i64> poison
)
ret <vscale x 1 x i64> %x
Expand All @@ -237,7 +237,7 @@ define <vscale x 1 x i64> @neg_shl_is_not_commutative(ptr %p) {
%x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 1),
<vscale x 1 x i64> poison
)
ret <vscale x 1 x i64> %x
Expand All @@ -258,7 +258,7 @@ define <vscale x 1 x i64> @straightline_offset_shl_nonc(ptr %p, i64 %shift) {
%x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer),
<vscale x 1 x i1> splat (i1 1),
<vscale x 1 x i64> poison
)
ret <vscale x 1 x i64> %x
Expand All @@ -279,7 +279,7 @@ define void @scatter_loopless(<vscale x 1 x i64> %x, ptr %p, i64 %stride) {
<vscale x 1 x i64> %x,
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer)
<vscale x 1 x i1> splat (i1 1)
)
ret void
}
Expand All @@ -296,7 +296,7 @@ define void @constant_stride(<vscale x 1 x i64> %x, ptr %p, i64 %stride) {
<vscale x 1 x i64> %x,
<vscale x 1 x ptr> %ptrs,
i32 8,
<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 1, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer)
<vscale x 1 x i1> splat (i1 1)
)
ret void
}
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
Original file line number Diff line number Diff line change
Expand Up @@ -79,18 +79,18 @@ start:
Cond1: ; preds = %start
%v15 = tail call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
%v17 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15, i64 0)
%vs12.i.i.i = add <vscale x 1 x i16> %v15, shufflevector (<vscale x 1 x i16> insertelement (<vscale x 1 x i16> poison, i16 1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer)
%vs12.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 1)
%v18 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i, i64 0)
%vs16.i.i.i = add <vscale x 1 x i16> %v15, shufflevector (<vscale x 1 x i16> insertelement (<vscale x 1 x i16> poison, i16 3, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer)
%vs16.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 3)
%v20 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs16.i.i.i, i64 0)
br label %UseSR

Cond2: ; preds = %start
%v15.2 = tail call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
%v17.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15.2, i64 1)
%vs12.i.i.i.2 = add <vscale x 1 x i16> %v15.2, shufflevector (<vscale x 1 x i16> insertelement (<vscale x 1 x i16> poison, i16 1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer)
%vs12.i.i.i.2 = add <vscale x 1 x i16> %v15.2, splat (i16 1)
%v18.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i.2, i64 1)
%vs16.i.i.i.2 = add <vscale x 1 x i16> %v15.2, shufflevector (<vscale x 1 x i16> insertelement (<vscale x 1 x i16> poison, i16 3, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer)
%vs16.i.i.i.2 = add <vscale x 1 x i16> %v15.2, splat (i16 3)
%v20.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs16.i.i.i.2, i64 1)
br label %UseSR

Expand Down Expand Up @@ -132,9 +132,9 @@ define internal void @SubRegLivenessUndef() {
loopIR.preheader.i.i:
%v15 = tail call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
%v17 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15, i64 0)
%vs12.i.i.i = add <vscale x 1 x i16> %v15, shufflevector (<vscale x 1 x i16> insertelement (<vscale x 1 x i16> poison, i16 1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer)
%vs12.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 1)
%v18 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i, i64 0)
%vs16.i.i.i = add <vscale x 1 x i16> %v15, shufflevector (<vscale x 1 x i16> insertelement (<vscale x 1 x i16> poison, i16 3, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer)
%vs16.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 3)
%v20 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs16.i.i.i, i64 0)
br label %loopIR3.i.i

Expand Down
88 changes: 44 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32_unmasked(<vscale x 2 x half>
; CHECK-NEXT: vfwcvt.f.f.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x float> %v
}

Expand All @@ -50,7 +50,7 @@ define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64_unmasked(<vscale x 2 x half
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x double> %v
}

Expand All @@ -74,7 +74,7 @@ define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64_unmasked(<vscale x 2 x floa
; CHECK-NEXT: vfwcvt.f.f.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x double> %v
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %v
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %v
}

Expand All @@ -66,7 +66,7 @@ define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f32_unmasked(<vscale x 2 x float> %
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %v
}

Expand All @@ -91,6 +91,6 @@ define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %v
}
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %v
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}

Expand Down Expand Up @@ -105,7 +105,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half>
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}

Expand Down Expand Up @@ -145,7 +145,7 @@ define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half>
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}

Expand Down Expand Up @@ -187,7 +187,7 @@ define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half>
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.rtz.x.f.v v8, v10
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}

Expand All @@ -213,7 +213,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f32_unmasked(<vscale x 2 x float> %
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}

Expand All @@ -237,7 +237,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f32_unmasked(<vscale x 2 x float>
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}

Expand All @@ -259,7 +259,7 @@ define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f32_unmasked(<vscale x 2 x float>
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -283,7 +283,7 @@ define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f32_unmasked(<vscale x 2 x float>
; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}

Expand Down Expand Up @@ -313,7 +313,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}

Expand All @@ -339,7 +339,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}

Expand All @@ -363,7 +363,7 @@ define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -385,7 +385,7 @@ define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}

Expand Down Expand Up @@ -466,6 +466,6 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32_unmasked(<vscale x 32 x fl
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> shufflevector (<vscale x 32 x i1> insertelement (<vscale x 32 x i1> undef, i1 true, i32 0), <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x i32> %v
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %v
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %v
}

Expand All @@ -66,7 +66,7 @@ define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f32_unmasked(<vscale x 2 x float> %
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %v
}

Expand All @@ -91,6 +91,6 @@ define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %v
}
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %v
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}

Expand Down Expand Up @@ -105,7 +105,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half>
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}

Expand Down Expand Up @@ -145,7 +145,7 @@ define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half>
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}

Expand Down Expand Up @@ -187,7 +187,7 @@ define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half>
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}

Expand All @@ -213,7 +213,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f32_unmasked(<vscale x 2 x float> %
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}

Expand All @@ -237,7 +237,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f32_unmasked(<vscale x 2 x float>
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}

Expand All @@ -259,7 +259,7 @@ define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f32_unmasked(<vscale x 2 x float>
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -283,7 +283,7 @@ define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f32_unmasked(<vscale x 2 x float>
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}

Expand Down Expand Up @@ -313,7 +313,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}

Expand All @@ -339,7 +339,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}

Expand All @@ -363,7 +363,7 @@ define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -385,7 +385,7 @@ define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}

Expand Down Expand Up @@ -466,6 +466,6 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32_unmasked(<vscale x 32 x fl
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> shufflevector (<vscale x 32 x i1> insertelement (<vscale x 32 x i1> undef, i1 true, i32 0), <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x i32> %v
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ define <vscale x 2 x half> @vfptrunc_nxv2f16_nxv2f32_unmasked(<vscale x 2 x floa
; CHECK-NEXT: vfncvt.f.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x half> %v
}

Expand All @@ -50,7 +50,7 @@ define <vscale x 2 x half> @vfptrunc_nxv2f16_nxv2f64_unmasked(<vscale x 2 x doub
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x half> %v
}

Expand All @@ -74,7 +74,7 @@ define <vscale x 2 x float> @vfptrunc_nxv2f32_nxv2f64_unmasked(<vscale x 2 x dou
; CHECK-NEXT: vfncvt.f.f.w v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f64.nxv2f32(<vscale x 2 x double> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f64.nxv2f32(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x float> %v
}

Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ define <vscale x 2 x float> @vfwadd_same_operand(<vscale x 2 x half> %arg, i32 s
; ZVFHMIN-NEXT: vfadd.vv v8, v9, v9
; ZVFHMIN-NEXT: ret
bb:
%tmp = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %arg, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%tmp2 = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %tmp, <vscale x 2 x float> %tmp, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%tmp = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %arg, <vscale x 2 x i1> splat (i1 true), i32 %vl)
%tmp2 = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %tmp, <vscale x 2 x float> %tmp, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x float> %tmp2
}

Expand All @@ -48,9 +48,9 @@ define <vscale x 2 x float> @vfwadd_tu(<vscale x 2 x half> %arg, <vscale x 2 x f
; ZVFHMIN-NEXT: vmv1r.v v8, v9
; ZVFHMIN-NEXT: ret
bb:
%tmp = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %arg, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 %arg2)
%tmp3 = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %arg1, <vscale x 2 x float> %tmp, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 %arg2)
%tmp4 = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x float> %tmp3, <vscale x 2 x float> %arg1, i32 %arg2)
%tmp = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %arg, <vscale x 2 x i1> splat (i1 true), i32 %arg2)
%tmp3 = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %arg1, <vscale x 2 x float> %tmp, <vscale x 2 x i1> splat (i1 true), i32 %arg2)
%tmp4 = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> %tmp3, <vscale x 2 x float> %arg1, i32 %arg2)
ret <vscale x 2 x float> %tmp4
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ define <vscale x 4 x i32> @combine_mul_add_imm1(<vscale x 4 x i32> %a, <vscale x
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vmadd.vv v8, v10, v10
; CHECK-NEXT: ret
%x = add <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%x = add <vscale x 4 x i32> %a, splat (i32 1)
%y = mul <vscale x 4 x i32> %x, %b
ret <vscale x 4 x i32> %y
}
Expand All @@ -608,7 +608,7 @@ define <vscale x 4 x i32> @combine_mul_add_imm1_2(<vscale x 4 x i32> %a, <vscale
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vmadd.vv v8, v10, v10
; CHECK-NEXT: ret
%x = add <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
%x = add <vscale x 4 x i32> %a, splat (i32 1)
%y = mul <vscale x 4 x i32> %b, %x
ret <vscale x 4 x i32> %y
}
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ define <vscale x 4 x i32> @combine_mul_sub_imm1(<vscale x 4 x i32> %a, <vscale x
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vnmsub.vv v8, v10, v10
; CHECK-NEXT: ret
%x = sub <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), %a
%x = sub <vscale x 4 x i32> splat (i32 1), %a
%y = mul <vscale x 4 x i32> %x, %b
ret <vscale x 4 x i32> %y
}
Expand All @@ -608,7 +608,7 @@ define <vscale x 4 x i32> @combine_mul_sub_imm1_2(<vscale x 4 x i32> %a, <vscale
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vnmsub.vv v8, v10, v10
; CHECK-NEXT: ret
%x = sub <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), %a
%x = sub <vscale x 4 x i32> splat (i32 1), %a
%y = mul <vscale x 4 x i32> %b, %x
ret <vscale x 4 x i32> %y
}
88 changes: 44 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -762,7 +762,7 @@ define <vscale x 2 x i1> @select_one(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y,
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmorn.mm v0, v8, v0
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %a
}

Expand All @@ -782,7 +782,7 @@ define <vscale x 2 x i1> @select_x_one(<vscale x 2 x i1> %x, <vscale x 2 x i1> %
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmorn.mm v0, v8, v0
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i1> %a
}

Expand All @@ -802,7 +802,7 @@ define <vscale x 2 x i1> @select_one_x(<vscale x 2 x i1> %x, <vscale x 2 x i1> %
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmor.mm v0, v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %y, i32 %evl)
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i1> %y, i32 %evl)
ret <vscale x 2 x i1> %a
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ define <vscale x 2 x i16> @vsext_nxv2i1_nxv2i16_unmasked(<vscale x 2 x i1> %a, i
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.sext.nxv2i16.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i16> @llvm.vp.sext.nxv2i16.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i16> %v
}

Expand All @@ -46,7 +46,7 @@ define <vscale x 2 x i32> @vsext_nxv2i1_nxv2i32_unmasked(<vscale x 2 x i1> %a, i
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -70,6 +70,6 @@ define <vscale x 2 x i64> @vsext_nxv2i1_nxv2i64_unmasked(<vscale x 2 x i1> %a, i
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i64> %v
}
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ define <vscale x 2 x i16> @vsext_nxv2i8_nxv2i16_unmasked(<vscale x 2 x i8> %a, i
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.sext.nxv2i16.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i16> @llvm.vp.sext.nxv2i16.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i16> %v
}

Expand All @@ -46,7 +46,7 @@ define <vscale x 2 x i32> @vsext_nxv2i8_nxv2i32_unmasked(<vscale x 2 x i8> %a, i
; CHECK-NEXT: vsext.vf4 v9, v8
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -70,7 +70,7 @@ define <vscale x 2 x i64> @vsext_nxv2i8_nxv2i64_unmasked(<vscale x 2 x i8> %a, i
; CHECK-NEXT: vsext.vf8 v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i64> %v
}

Expand All @@ -94,7 +94,7 @@ define <vscale x 2 x i32> @vsext_nxv2i16_nxv2i32_unmasked(<vscale x 2 x i16> %a,
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i32> @llvm.vp.sext.nxv2i32.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i32> %v
}

Expand All @@ -118,7 +118,7 @@ define <vscale x 2 x i64> @vsext_nxv2i16_nxv2i64_unmasked(<vscale x 2 x i16> %a,
; CHECK-NEXT: vsext.vf4 v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i64> %v
}

Expand All @@ -142,7 +142,7 @@ define <vscale x 2 x i64> @vsext_nxv2i32_nxv2i64_unmasked(<vscale x 2 x i32> %a,
; CHECK-NEXT: vsext.vf2 v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 2 x i64> @llvm.vp.sext.nxv2i64.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
ret <vscale x 2 x i64> %v
}

Expand Down Expand Up @@ -195,6 +195,6 @@ define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32_unmasked(<vscale x 32 x i8> %
; CHECK-NEXT: vsext.vf4 v24, v8
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.sext.nxv32i32.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i1> shufflevector (<vscale x 32 x i1> insertelement (<vscale x 32 x i1> undef, i1 true, i32 0), <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer), i32 %vl)
%v = call <vscale x 32 x i32> @llvm.vp.sext.nxv32i32.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i1> splat (i1 true), i32 %vl)
ret <vscale x 32 x i32> %v
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ define <vscale x 2 x half> @vsitofp_nxv2f16_nxv2i1_unmasked(<vscale x 2 x i1> %v
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}

Expand All @@ -52,7 +52,7 @@ define <vscale x 2 x float> @vsitofp_nxv2f32_nxv2i1_unmasked(<vscale x 2 x i1> %
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x float> %v
}

Expand All @@ -79,6 +79,6 @@ define <vscale x 2 x double> @vsitofp_nxv2f64_nxv2i1_unmasked(<vscale x 2 x i1>
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ define <vscale x 2 x half> @vsitofp_nxv2f16_nxv2i8_unmasked(<vscale x 2 x i8> %v
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}

Expand Down Expand Up @@ -107,7 +107,7 @@ define <vscale x 2 x half> @vsitofp_nxv2f16_nxv2i16_unmasked(<vscale x 2 x i16>
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}

Expand Down Expand Up @@ -147,7 +147,7 @@ define <vscale x 2 x half> @vsitofp_nxv2f16_nxv2i32_unmasked(<vscale x 2 x i32>
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}

Expand Down Expand Up @@ -189,7 +189,7 @@ define <vscale x 2 x half> @vsitofp_nxv2f16_nxv2i64_unmasked(<vscale x 2 x i64>
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}

Expand All @@ -213,7 +213,7 @@ define <vscale x 2 x float> @vsitofp_nxv2f32_nxv2i8_unmasked(<vscale x 2 x i8> %
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vfwcvt.f.x.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x float> %v
}

Expand All @@ -237,7 +237,7 @@ define <vscale x 2 x float> @vsitofp_nxv2f32_nxv2i16_unmasked(<vscale x 2 x i16>
; CHECK-NEXT: vfwcvt.f.x.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x float> %v
}

Expand All @@ -259,7 +259,7 @@ define <vscale x 2 x float> @vsitofp_nxv2f32_nxv2i32_unmasked(<vscale x 2 x i32>
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x float> %v
}

Expand All @@ -283,7 +283,7 @@ define <vscale x 2 x float> @vsitofp_nxv2f32_nxv2i64_unmasked(<vscale x 2 x i64>
; CHECK-NEXT: vfncvt.f.x.w v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x float> %v
}

Expand All @@ -307,7 +307,7 @@ define <vscale x 2 x double> @vsitofp_nxv2f64_nxv2i8_unmasked(<vscale x 2 x i8>
; CHECK-NEXT: vsext.vf4 v10, v8
; CHECK-NEXT: vfwcvt.f.x.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}

Expand All @@ -331,7 +331,7 @@ define <vscale x 2 x double> @vsitofp_nxv2f64_nxv2i16_unmasked(<vscale x 2 x i16
; CHECK-NEXT: vsext.vf2 v10, v8
; CHECK-NEXT: vfwcvt.f.x.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}

Expand All @@ -355,7 +355,7 @@ define <vscale x 2 x double> @vsitofp_nxv2f64_nxv2i32_unmasked(<vscale x 2 x i32
; CHECK-NEXT: vfwcvt.f.x.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}

Expand All @@ -377,7 +377,7 @@ define <vscale x 2 x double> @vsitofp_nxv2f64_nxv2i64_unmasked(<vscale x 2 x i64
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 2 x double> @llvm.vp.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}

Expand Down Expand Up @@ -486,6 +486,6 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32_unmasked(<vscale x 32 x
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: ret
%v = call <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> shufflevector (<vscale x 32 x i1> insertelement (<vscale x 32 x i1> undef, i1 true, i32 0), <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer), i32 %evl)
%v = call <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x float> %v
}
Loading