Skip to content

Commit

Permalink
[LLVM][test] Convert remaining instances of ConstantExpr based splats…
Browse files Browse the repository at this point in the history
… to use splat().

This is mostly NFC but some output does change due to consistently
inserting into poison rather than undef and using i64 as the index
type for inserts.
  • Loading branch information
paulwalker-arm committed Feb 27, 2024
1 parent 6a17929 commit 900bea9
Show file tree
Hide file tree
Showing 5 changed files with 132 additions and 132 deletions.
28 changes: 14 additions & 14 deletions llvm/test/Analysis/CostModel/AArch64/ext-rhadd.ll
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ define void @srhadd_i8_sext_i16_scalable(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 16 x i8>, ptr %b
%ext1 = sext <vscale x 16 x i8> %ld1 to <vscale x 16 x i16>
%ext2 = sext <vscale x 16 x i8> %ld2 to <vscale x 16 x i16>
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, splat (i16 1)
%add2 = add nuw nsw <vscale x 16 x i16> %add1, %ext2
%shr = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
%shr = lshr <vscale x 16 x i16> %add2, splat (i16 1)
%trunc = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
store <vscale x 16 x i8> %trunc, ptr %a
ret void
Expand All @@ -58,9 +58,9 @@ define void @srhadd_i16_sext_i64_scalable(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 8 x i16>, ptr %b
%ext1 = sext <vscale x 8 x i16> %ld1 to <vscale x 8 x i64>
%ext2 = sext <vscale x 8 x i16> %ld2 to <vscale x 8 x i64>
%add1 = add nuw nsw <vscale x 8 x i64> %ext1, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 8 x i64> %ext1, splat (i64 1)
%add2 = add nuw nsw <vscale x 8 x i64> %add1, %ext2
%shr = lshr <vscale x 8 x i64> %add2, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
%shr = lshr <vscale x 8 x i64> %add2, splat (i64 1)
%trunc = trunc <vscale x 8 x i64> %shr to <vscale x 8 x i16>
store <vscale x 8 x i16> %trunc, ptr %a
ret void
Expand Down Expand Up @@ -102,9 +102,9 @@ define void @urhadd_i8_zext_i64(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 16 x i8>, ptr %b
%ext1 = zext <vscale x 16 x i8> %ld1 to <vscale x 16 x i64>
%ext2 = zext <vscale x 16 x i8> %ld2 to <vscale x 16 x i64>
%add1 = add nuw nsw <vscale x 16 x i64> %ext1, shufflevector (<vscale x 16 x i64> insertelement (<vscale x 16 x i64> poison, i64 1, i64 0), <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 16 x i64> %ext1, splat (i64 1)
%add2 = add nuw nsw <vscale x 16 x i64> %add1, %ext2
%shr = lshr <vscale x 16 x i64> %add2, shufflevector (<vscale x 16 x i64> insertelement (<vscale x 16 x i64> poison, i64 1, i64 0), <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer)
%shr = lshr <vscale x 16 x i64> %add2, splat (i64 1)
%trunc = trunc <vscale x 16 x i64> %shr to <vscale x 16 x i8>
store <vscale x 16 x i8> %trunc, ptr %a
ret void
Expand All @@ -123,9 +123,9 @@ define void @urhadd_i16_zext_i32(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 8 x i16>, ptr %b
%ext1 = zext <vscale x 8 x i16> %ld1 to <vscale x 8 x i32>
%ext2 = zext <vscale x 8 x i16> %ld2 to <vscale x 8 x i32>
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, splat (i32 1)
%add2 = add nuw nsw <vscale x 8 x i32> %add1, %ext2
%shr = lshr <vscale x 8 x i32> %add2, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
%shr = lshr <vscale x 8 x i32> %add2, splat (i32 1)
%trunc = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
store <vscale x 8 x i16> %trunc, ptr %a
ret void
Expand All @@ -146,9 +146,9 @@ define void @ext_operand_mismatch(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 16 x i8>, ptr %b
%ext1 = sext <vscale x 16 x i8> %ld1 to <vscale x 16 x i16>
%ext2 = zext <vscale x 16 x i8> %ld2 to <vscale x 16 x i16>
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, splat (i16 1)
%add2 = add nuw nsw <vscale x 16 x i16> %add1, %ext2
%shr = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
%shr = lshr <vscale x 16 x i16> %add2, splat (i16 1)
%trunc = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
store <vscale x 16 x i8> %trunc, ptr %a
ret void
Expand All @@ -167,9 +167,9 @@ define void @add_multiple_uses(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 8 x i16>, ptr %b
%ext1 = sext <vscale x 8 x i16> %ld1 to <vscale x 8 x i32>
%ext2 = sext <vscale x 8 x i16> %ld2 to <vscale x 8 x i32>
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, splat (i32 1)
%add2 = add nuw nsw <vscale x 8 x i32> %add1, %ext2
%shr = lshr <vscale x 8 x i32> %add2, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
%shr = lshr <vscale x 8 x i32> %add2, splat (i32 1)
%trunc = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
%add.res = add nuw nsw <vscale x 8 x i32> %add1, %add2
%res = trunc <vscale x 8 x i32> %add.res to <vscale x 8 x i16>
Expand All @@ -190,9 +190,9 @@ define void @shift_multiple_uses(ptr %a, ptr %b, ptr %dst) {
%ld2 = load <vscale x 16 x i8>, ptr %b
%ext1 = zext <vscale x 16 x i8> %ld1 to <vscale x 16 x i16>
%ext2 = zext <vscale x 16 x i8> %ld2 to <vscale x 16 x i16>
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, splat (i16 1)
%add2 = add nuw nsw <vscale x 16 x i16> %add1, %ext2
%shr = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
%shr = lshr <vscale x 16 x i16> %add2, splat (i16 1)
%trunc = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
%add3 = add nuw nsw <vscale x 16 x i16> %shr, %add2
%res = trunc <vscale x 16 x i16> %add3 to <vscale x 16 x i8>
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/Generic/expand-vp-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %ev
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64.p0(ptr [[PTR:%.*]], i32 1, <vscale x 1 x i1> [[TMP2]], <vscale x 1 x i64> poison)
; CHECK-NEXT: ret <vscale x 1 x i64> [[TMP3]]
;
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %evl)
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x i64> %load
}

Expand All @@ -140,7 +140,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask_vscale(ptr %ptr) {
;
%vscale = call i32 @llvm.vscale.i32()
%vlmax = mul nuw i32 %vscale, 1
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %vlmax)
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %vlmax)
ret <vscale x 1 x i64> %load
}

Expand Down Expand Up @@ -179,7 +179,7 @@ define void @vpstore_nxv1i64_allones_mask(<vscale x 1 x i64> %val, ptr %ptr, i32
; CHECK-NEXT: call void @llvm.masked.store.nxv1i64.p0(<vscale x 1 x i64> [[VAL:%.*]], ptr [[PTR:%.*]], i32 1, <vscale x 1 x i1> [[TMP2]])
; CHECK-NEXT: ret void
;
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %evl)
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret void
}

Expand All @@ -192,7 +192,7 @@ define void @vpstore_nxv1i64_allones_mask_vscale(<vscale x 1 x i64> %val, ptr %p
;
%vscale = call i32 @llvm.vscale.i32()
%vlmax = mul nuw i32 %vscale, 1
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %vlmax)
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %vlmax)
ret void
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/Transforms/AggressiveInstCombine/vector-or-load.ll
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ define <vscale x 8 x i16> @or-load-scalable-vector(ptr %p1) {
; CHECK-NEXT: [[L2:%.*]] = load <vscale x 8 x i8>, ptr [[P2]], align 1
; CHECK-NEXT: [[E1:%.*]] = zext <vscale x 8 x i8> [[L1]] to <vscale x 8 x i16>
; CHECK-NEXT: [[E2:%.*]] = zext <vscale x 8 x i8> [[L2]] to <vscale x 8 x i16>
; CHECK-NEXT: [[S2:%.*]] = shl <vscale x 8 x i16> [[E2]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-NEXT: [[S2:%.*]] = shl <vscale x 8 x i16> [[E2]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-NEXT: [[OR:%.*]] = or <vscale x 8 x i16> [[E1]], [[S2]]
; CHECK-NEXT: ret <vscale x 8 x i16> [[OR]]
;
Expand All @@ -44,7 +44,7 @@ define <vscale x 8 x i16> @or-load-scalable-vector(ptr %p1) {
%l2 = load <vscale x 8 x i8>, ptr %p2, align 1
%e1 = zext <vscale x 8 x i8> %l1 to <vscale x 8 x i16>
%e2 = zext <vscale x 8 x i8> %l2 to <vscale x 8 x i16>
%s2 = shl <vscale x 8 x i16> %e2, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
%s2 = shl <vscale x 8 x i16> %e2, splat (i16 8)
%or = or <vscale x 8 x i16> %e1, %s2
ret <vscale x 8 x i16> %or
}
4 changes: 2 additions & 2 deletions llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,15 @@ define void @callslotoptzn(<vscale x 4 x float> %val, ptr %out) {
; CHECK-NEXT: [[ALLOC:%.*]] = alloca <vscale x 4 x float>, align 16
; CHECK-NEXT: [[IDX:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds float, ptr [[ALLOC]], <vscale x 4 x i32> [[IDX]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VAL:%.*]], <vscale x 4 x ptr> [[STRIDE]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VAL:%.*]], <vscale x 4 x ptr> [[STRIDE]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: [[LI:%.*]] = load <vscale x 4 x float>, ptr [[ALLOC]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[LI]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
%alloc = alloca <vscale x 4 x float>, align 16
%idx = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
%stride = getelementptr inbounds float, ptr %alloc, <vscale x 4 x i32> %idx
call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> splat (i1 true))
%li = load <vscale x 4 x float>, ptr %alloc, align 4
store <vscale x 4 x float> %li, ptr %out, align 4
ret void
Expand Down

0 comments on commit 900bea9

Please sign in to comment.