Skip to content

Commit 1ef5699

Browse files
committed
[DAGCombiner] Support fold zero scalar vector.
This patch changes ISD::isBuildVectorAllZeros to ISD::isConstantSplatVectorAllZeros which handles zero sclar vector. TestPlan: check-llvm Differential Revision: https://reviews.llvm.org/D100813
1 parent b22721f commit 1ef5699

File tree

7 files changed

+73
-56
lines changed

7 files changed

+73
-56
lines changed

llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2260,9 +2260,9 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
22602260
return FoldedVOp;
22612261

22622262
// fold (add x, 0) -> x, vector edition
2263-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
2263+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
22642264
return N0;
2265-
if (ISD::isBuildVectorAllZeros(N0.getNode()))
2265+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
22662266
return N1;
22672267
}
22682268

@@ -2543,9 +2543,9 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) {
25432543
// TODO SimplifyVBinOp
25442544

25452545
// fold (add_sat x, 0) -> x, vector edition
2546-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
2546+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
25472547
return N0;
2548-
if (ISD::isBuildVectorAllZeros(N0.getNode()))
2548+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
25492549
return N1;
25502550
}
25512551

@@ -3249,7 +3249,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
32493249
return FoldedVOp;
32503250

32513251
// fold (sub x, 0) -> x, vector edition
3252-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
3252+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
32533253
return N0;
32543254
}
32553255

@@ -3582,7 +3582,7 @@ SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
35823582
// TODO SimplifyVBinOp
35833583

35843584
// fold (sub_sat x, 0) -> x, vector edition
3585-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
3585+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
35863586
return N0;
35873587
}
35883588

@@ -4438,8 +4438,8 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
44384438
if (VT.isVector()) {
44394439
// fold (mulhs x, 0) -> 0
44404440
// do not return N0/N1, because undef node may exist.
4441-
if (ISD::isBuildVectorAllZeros(N0.getNode()) ||
4442-
ISD::isBuildVectorAllZeros(N1.getNode()))
4441+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
4442+
ISD::isConstantSplatVectorAllZeros(N1.getNode()))
44434443
return DAG.getConstant(0, DL, VT);
44444444
}
44454445

@@ -4486,8 +4486,8 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
44864486
if (VT.isVector()) {
44874487
// fold (mulhu x, 0) -> 0
44884488
// do not return N0/N1, because undef node may exist.
4489-
if (ISD::isBuildVectorAllZeros(N0.getNode()) ||
4490-
ISD::isBuildVectorAllZeros(N1.getNode()))
4489+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
4490+
ISD::isConstantSplatVectorAllZeros(N1.getNode()))
44914491
return DAG.getConstant(0, DL, VT);
44924492
}
44934493

@@ -5585,11 +5585,11 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
55855585
return FoldedVOp;
55865586

55875587
// fold (and x, 0) -> 0, vector edition
5588-
if (ISD::isBuildVectorAllZeros(N0.getNode()))
5588+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
55895589
// do not return N0, because undef node may exist in N0
55905590
return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
55915591
SDLoc(N), N0.getValueType());
5592-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
5592+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
55935593
// do not return N1, because undef node may exist in N1
55945594
return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
55955595
SDLoc(N), N1.getValueType());
@@ -6351,9 +6351,9 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
63516351
return FoldedVOp;
63526352

63536353
// fold (or x, 0) -> x, vector edition
6354-
if (ISD::isBuildVectorAllZeros(N0.getNode()))
6354+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
63556355
return N1;
6356-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
6356+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
63576357
return N0;
63586358

63596359
// fold (or x, -1) -> -1, vector edition
@@ -7712,9 +7712,9 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
77127712
return FoldedVOp;
77137713

77147714
// fold (xor x, 0) -> x, vector edition
7715-
if (ISD::isBuildVectorAllZeros(N0.getNode()))
7715+
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
77167716
return N1;
7717-
if (ISD::isBuildVectorAllZeros(N1.getNode()))
7717+
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
77187718
return N0;
77197719
}
77207720

llvm/test/CodeGen/AArch64/sve-int-arith.ll

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,14 @@ define <vscale x 16 x i8> @add_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
3737
ret <vscale x 16 x i8> %res
3838
}
3939

40+
define <vscale x 16 x i8> @add_i8_zero(<vscale x 16 x i8> %a) {
41+
; CHECK-LABEL: add_i8_zero:
42+
; CHECK: // %bb.0:
43+
; CHECK-NEXT: ret
44+
%res = add <vscale x 16 x i8> %a, zeroinitializer
45+
ret <vscale x 16 x i8> %res
46+
}
47+
4048
define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
4149
; CHECK-LABEL: sub_i64:
4250
; CHECK: // %bb.0:
@@ -73,6 +81,14 @@ define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
7381
ret <vscale x 16 x i8> %res
7482
}
7583

84+
define <vscale x 16 x i8> @sub_i8_zero(<vscale x 16 x i8> %a) {
85+
; CHECK-LABEL: sub_i8_zero:
86+
; CHECK: // %bb.0:
87+
; CHECK-NEXT: ret
88+
%res = sub <vscale x 16 x i8> %a, zeroinitializer
89+
ret <vscale x 16 x i8> %res
90+
}
91+
7692
define <vscale x 16 x i8> @abs_nxv16i8(<vscale x 16 x i8> %a) {
7793
; CHECK-LABEL: abs_nxv16i8:
7894
; CHECK: // %bb.0:
@@ -166,6 +182,14 @@ define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
166182
ret <vscale x 4 x i32> %res
167183
}
168184

185+
define <vscale x 4 x i32> @sqadd_i32_zero(<vscale x 4 x i32> %a) {
186+
; CHECK-LABEL: sqadd_i32_zero:
187+
; CHECK: // %bb.0:
188+
; CHECK-NEXT: ret
189+
%res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer)
190+
ret <vscale x 4 x i32> %res
191+
}
192+
169193
define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
170194
; CHECK-LABEL: sqadd_i16:
171195
; CHECK: // %bb.0:
@@ -194,6 +218,14 @@ define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
194218
ret <vscale x 2 x i64> %res
195219
}
196220

221+
define <vscale x 2 x i64> @sqsub_i64_zero(<vscale x 2 x i64> %a) {
222+
; CHECK-LABEL: sqsub_i64_zero:
223+
; CHECK: // %bb.0:
224+
; CHECK-NEXT: ret
225+
%res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer)
226+
ret <vscale x 2 x i64> %res
227+
}
228+
197229
define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
198230
; CHECK-LABEL: sqsub_i32:
199231
; CHECK: // %bb.0:

llvm/test/CodeGen/AArch64/sve-int-log.ll

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,15 @@ define <vscale x 16 x i8> @and_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
3737
ret <vscale x 16 x i8> %res
3838
}
3939

40+
define <vscale x 16 x i8> @and_b_zero(<vscale x 16 x i8> %a) {
41+
; CHECK-LABEL: and_b_zero:
42+
; CHECK: // %bb.0:
43+
; CHECK-NEXT: mov z0.b, #0 // =0x0
44+
; CHECK-NEXT: ret
45+
%res = and <vscale x 16 x i8> %a, zeroinitializer
46+
ret <vscale x 16 x i8> %res
47+
}
48+
4049
define <vscale x 2 x i1> @and_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
4150
; CHECK-LABEL: and_pred_d:
4251
; CHECK: // %bb.0:
@@ -113,6 +122,14 @@ define <vscale x 16 x i8> @or_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
113122
ret <vscale x 16 x i8> %res
114123
}
115124

125+
define <vscale x 16 x i8> @or_b_zero(<vscale x 16 x i8> %a) {
126+
; CHECK-LABEL: or_b_zero:
127+
; CHECK: // %bb.0:
128+
; CHECK-NEXT: ret
129+
%res = or <vscale x 16 x i8> %a, zeroinitializer
130+
ret <vscale x 16 x i8> %res
131+
}
132+
116133
define <vscale x 2 x i1> @or_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
117134
; CHECK-LABEL: or_pred_d:
118135
; CHECK: // %bb.0:
@@ -189,6 +206,14 @@ define <vscale x 16 x i8> @xor_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
189206
ret <vscale x 16 x i8> %res
190207
}
191208

209+
define <vscale x 16 x i8> @xor_b_zero(<vscale x 16 x i8> %a) {
210+
; CHECK-LABEL: xor_b_zero:
211+
; CHECK: // %bb.0:
212+
; CHECK-NEXT: ret
213+
%res = xor <vscale x 16 x i8> %a, zeroinitializer
214+
ret <vscale x 16 x i8> %res
215+
}
216+
192217
define <vscale x 2 x i1> @xor_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
193218
; CHECK-LABEL: xor_pred_d:
194219
; CHECK: // %bb.0:

llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,6 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
341341
; CHECK-NEXT: addi a0, a0, 1755
342342
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
343343
; CHECK-NEXT: vmulh.vx v25, v8, a0
344-
; CHECK-NEXT: vadd.vi v25, v25, 0
345344
; CHECK-NEXT: vsra.vi v25, v25, 1
346345
; CHECK-NEXT: vsrl.vi v26, v25, 15
347346
; CHECK-NEXT: vand.vi v26, v26, -1
@@ -382,7 +381,6 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
382381
; CHECK-NEXT: addi a0, a0, 1755
383382
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
384383
; CHECK-NEXT: vmulh.vx v25, v8, a0
385-
; CHECK-NEXT: vadd.vi v25, v25, 0
386384
; CHECK-NEXT: vsra.vi v25, v25, 1
387385
; CHECK-NEXT: vsrl.vi v26, v25, 15
388386
; CHECK-NEXT: vand.vi v26, v26, -1
@@ -423,7 +421,6 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
423421
; CHECK-NEXT: addi a0, a0, 1755
424422
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
425423
; CHECK-NEXT: vmulh.vx v25, v8, a0
426-
; CHECK-NEXT: vadd.vi v25, v25, 0
427424
; CHECK-NEXT: vsra.vi v25, v25, 1
428425
; CHECK-NEXT: vsrl.vi v26, v25, 15
429426
; CHECK-NEXT: vand.vi v26, v26, -1
@@ -464,7 +461,6 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
464461
; CHECK-NEXT: addi a0, a0, 1755
465462
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
466463
; CHECK-NEXT: vmulh.vx v26, v8, a0
467-
; CHECK-NEXT: vadd.vi v26, v26, 0
468464
; CHECK-NEXT: vsra.vi v26, v26, 1
469465
; CHECK-NEXT: vsrl.vi v28, v26, 15
470466
; CHECK-NEXT: vand.vi v28, v28, -1
@@ -505,7 +501,6 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
505501
; CHECK-NEXT: addi a0, a0, 1755
506502
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
507503
; CHECK-NEXT: vmulh.vx v28, v8, a0
508-
; CHECK-NEXT: vadd.vi v28, v28, 0
509504
; CHECK-NEXT: vsra.vi v28, v28, 1
510505
; CHECK-NEXT: vsrl.vi v8, v28, 15
511506
; CHECK-NEXT: vand.vi v8, v8, -1
@@ -546,7 +541,6 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
546541
; CHECK-NEXT: addi a0, a0, 1755
547542
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
548543
; CHECK-NEXT: vmulh.vx v8, v8, a0
549-
; CHECK-NEXT: vadd.vi v8, v8, 0
550544
; CHECK-NEXT: vsra.vi v8, v8, 1
551545
; CHECK-NEXT: vsrl.vi v16, v8, 15
552546
; CHECK-NEXT: vand.vi v16, v16, -1
@@ -808,7 +802,6 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
808802
; CHECK-NEXT: vsrl.vx v26, v26, a0
809803
; CHECK-NEXT: vor.vv v25, v26, v25
810804
; CHECK-NEXT: vmulh.vv v25, v8, v25
811-
; CHECK-NEXT: vadd.vi v25, v25, 0
812805
; CHECK-NEXT: vsra.vi v26, v25, 1
813806
; CHECK-NEXT: addi a0, zero, 63
814807
; CHECK-NEXT: vsrl.vx v25, v25, a0
@@ -866,7 +859,6 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
866859
; CHECK-NEXT: vsrl.vx v28, v28, a0
867860
; CHECK-NEXT: vor.vv v26, v28, v26
868861
; CHECK-NEXT: vmulh.vv v26, v8, v26
869-
; CHECK-NEXT: vadd.vi v26, v26, 0
870862
; CHECK-NEXT: vsra.vi v28, v26, 1
871863
; CHECK-NEXT: addi a0, zero, 63
872864
; CHECK-NEXT: vsrl.vx v26, v26, a0
@@ -924,7 +916,6 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
924916
; CHECK-NEXT: vsrl.vx v12, v12, a0
925917
; CHECK-NEXT: vor.vv v28, v12, v28
926918
; CHECK-NEXT: vmulh.vv v28, v8, v28
927-
; CHECK-NEXT: vadd.vi v28, v28, 0
928919
; CHECK-NEXT: vsra.vi v8, v28, 1
929920
; CHECK-NEXT: addi a0, zero, 63
930921
; CHECK-NEXT: vsrl.vx v28, v28, a0
@@ -982,7 +973,6 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
982973
; CHECK-NEXT: vsrl.vx v24, v24, a0
983974
; CHECK-NEXT: vor.vv v16, v24, v16
984975
; CHECK-NEXT: vmulh.vv v8, v8, v16
985-
; CHECK-NEXT: vadd.vi v8, v8, 0
986976
; CHECK-NEXT: vsra.vi v16, v8, 1
987977
; CHECK-NEXT: addi a0, zero, 63
988978
; CHECK-NEXT: vsrl.vx v8, v8, a0

llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,6 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
317317
; CHECK-NEXT: addiw a0, a0, 1755
318318
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
319319
; CHECK-NEXT: vmulh.vx v25, v8, a0
320-
; CHECK-NEXT: vadd.vi v25, v25, 0
321320
; CHECK-NEXT: vsra.vi v25, v25, 1
322321
; CHECK-NEXT: vsrl.vi v26, v25, 15
323322
; CHECK-NEXT: vand.vi v26, v26, -1
@@ -358,7 +357,6 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
358357
; CHECK-NEXT: addiw a0, a0, 1755
359358
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
360359
; CHECK-NEXT: vmulh.vx v25, v8, a0
361-
; CHECK-NEXT: vadd.vi v25, v25, 0
362360
; CHECK-NEXT: vsra.vi v25, v25, 1
363361
; CHECK-NEXT: vsrl.vi v26, v25, 15
364362
; CHECK-NEXT: vand.vi v26, v26, -1
@@ -399,7 +397,6 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
399397
; CHECK-NEXT: addiw a0, a0, 1755
400398
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
401399
; CHECK-NEXT: vmulh.vx v25, v8, a0
402-
; CHECK-NEXT: vadd.vi v25, v25, 0
403400
; CHECK-NEXT: vsra.vi v25, v25, 1
404401
; CHECK-NEXT: vsrl.vi v26, v25, 15
405402
; CHECK-NEXT: vand.vi v26, v26, -1
@@ -440,7 +437,6 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
440437
; CHECK-NEXT: addiw a0, a0, 1755
441438
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
442439
; CHECK-NEXT: vmulh.vx v26, v8, a0
443-
; CHECK-NEXT: vadd.vi v26, v26, 0
444440
; CHECK-NEXT: vsra.vi v26, v26, 1
445441
; CHECK-NEXT: vsrl.vi v28, v26, 15
446442
; CHECK-NEXT: vand.vi v28, v28, -1
@@ -481,7 +477,6 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
481477
; CHECK-NEXT: addiw a0, a0, 1755
482478
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
483479
; CHECK-NEXT: vmulh.vx v28, v8, a0
484-
; CHECK-NEXT: vadd.vi v28, v28, 0
485480
; CHECK-NEXT: vsra.vi v28, v28, 1
486481
; CHECK-NEXT: vsrl.vi v8, v28, 15
487482
; CHECK-NEXT: vand.vi v8, v8, -1
@@ -522,7 +517,6 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
522517
; CHECK-NEXT: addiw a0, a0, 1755
523518
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
524519
; CHECK-NEXT: vmulh.vx v8, v8, a0
525-
; CHECK-NEXT: vadd.vi v8, v8, 0
526520
; CHECK-NEXT: vsra.vi v8, v8, 1
527521
; CHECK-NEXT: vsrl.vi v16, v8, 15
528522
; CHECK-NEXT: vand.vi v16, v16, -1
@@ -779,7 +773,6 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
779773
; CHECK-NEXT: addi a0, a0, 1755
780774
; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu
781775
; CHECK-NEXT: vmulh.vx v25, v8, a0
782-
; CHECK-NEXT: vadd.vi v25, v25, 0
783776
; CHECK-NEXT: vsra.vi v26, v25, 1
784777
; CHECK-NEXT: addi a0, zero, 63
785778
; CHECK-NEXT: vsrl.vx v25, v25, a0
@@ -827,7 +820,6 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
827820
; CHECK-NEXT: addi a0, a0, 1755
828821
; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
829822
; CHECK-NEXT: vmulh.vx v26, v8, a0
830-
; CHECK-NEXT: vadd.vi v26, v26, 0
831823
; CHECK-NEXT: vsra.vi v28, v26, 1
832824
; CHECK-NEXT: addi a0, zero, 63
833825
; CHECK-NEXT: vsrl.vx v26, v26, a0
@@ -875,7 +867,6 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
875867
; CHECK-NEXT: addi a0, a0, 1755
876868
; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu
877869
; CHECK-NEXT: vmulh.vx v28, v8, a0
878-
; CHECK-NEXT: vadd.vi v28, v28, 0
879870
; CHECK-NEXT: vsra.vi v8, v28, 1
880871
; CHECK-NEXT: addi a0, zero, 63
881872
; CHECK-NEXT: vsrl.vx v28, v28, a0
@@ -923,7 +914,6 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
923914
; CHECK-NEXT: addi a0, a0, 1755
924915
; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
925916
; CHECK-NEXT: vmulh.vx v8, v8, a0
926-
; CHECK-NEXT: vadd.vi v8, v8, 0
927917
; CHECK-NEXT: vsra.vi v16, v8, 1
928918
; CHECK-NEXT: addi a0, zero, 63
929919
; CHECK-NEXT: vsrl.vx v8, v8, a0

0 commit comments

Comments
 (0)