Skip to content

Commit d9683a7

Browse files
committed
[RISCV] Fix extract_vector_elt on i1 at idx 0 being inverted
It looks like the intention here is to truncate a XLenVT -> i1, in which case we should be emitting snez instead of sneq if I'm understanding correctly. Reviewed By: jacquesguan, frasercrmck Differential Revision: https://reviews.llvm.org/D149732
1 parent eadf6db commit d9683a7

11 files changed

+80
-80
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6165,7 +6165,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
61656165
SDValue Vfirst =
61666166
DAG.getNode(RISCVISD::VFIRST_VL, DL, XLenVT, Vec, Mask, VL);
61676167
return DAG.getSetCC(DL, XLenVT, Vfirst, DAG.getConstant(0, DL, XLenVT),
6168-
ISD::SETEQ);
6168+
ISD::SETNE);
61696169
}
61706170
if (VecVT.isFixedLengthVector()) {
61716171
unsigned NumElts = VecVT.getVectorNumElements();

llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
2929
; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
3030
; RV32-NEXT: vmv.x.s a0, v8
3131
; RV32-NEXT: vfirst.m a1, v10
32-
; RV32-NEXT: seqz a1, a1
32+
; RV32-NEXT: snez a1, a1
3333
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
3434
; RV32-NEXT: vmv.v.x v8, a1
3535
; RV32-NEXT: vmsne.vi v0, v8, 0
@@ -51,7 +51,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
5151
; RV64-NEXT: vmerge.vvm v8, v10, v8, v0
5252
; RV64-NEXT: vmv.x.s a0, v8
5353
; RV64-NEXT: vfirst.m a1, v12
54-
; RV64-NEXT: seqz a1, a1
54+
; RV64-NEXT: snez a1, a1
5555
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
5656
; RV64-NEXT: vmv.v.x v8, a1
5757
; RV64-NEXT: vmsne.vi v0, v8, 0

llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ define i1 @extractelt_nxv1i1_idx0(<vscale x 1 x i8>* %x) nounwind {
217217
; CHECK-NEXT: vle8.v v8, (a0)
218218
; CHECK-NEXT: vmseq.vi v8, v8, 0
219219
; CHECK-NEXT: vfirst.m a0, v8
220-
; CHECK-NEXT: seqz a0, a0
220+
; CHECK-NEXT: snez a0, a0
221221
; CHECK-NEXT: ret
222222
%a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
223223
%b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
@@ -232,7 +232,7 @@ define i1 @extractelt_nxv2i1_idx0(<vscale x 2 x i8>* %x) nounwind {
232232
; CHECK-NEXT: vle8.v v8, (a0)
233233
; CHECK-NEXT: vmseq.vi v8, v8, 0
234234
; CHECK-NEXT: vfirst.m a0, v8
235-
; CHECK-NEXT: seqz a0, a0
235+
; CHECK-NEXT: snez a0, a0
236236
; CHECK-NEXT: ret
237237
%a = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
238238
%b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
@@ -247,7 +247,7 @@ define i1 @extractelt_nxv4i1_idx0(<vscale x 4 x i8>* %x) nounwind {
247247
; CHECK-NEXT: vle8.v v8, (a0)
248248
; CHECK-NEXT: vmseq.vi v8, v8, 0
249249
; CHECK-NEXT: vfirst.m a0, v8
250-
; CHECK-NEXT: seqz a0, a0
250+
; CHECK-NEXT: snez a0, a0
251251
; CHECK-NEXT: ret
252252
%a = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
253253
%b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
@@ -262,7 +262,7 @@ define i1 @extractelt_nxv8i1_idx0(<vscale x 8 x i8>* %x) nounwind {
262262
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
263263
; CHECK-NEXT: vmseq.vi v8, v8, 0
264264
; CHECK-NEXT: vfirst.m a0, v8
265-
; CHECK-NEXT: seqz a0, a0
265+
; CHECK-NEXT: snez a0, a0
266266
; CHECK-NEXT: ret
267267
%a = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
268268
%b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
@@ -277,7 +277,7 @@ define i1 @extractelt_nxv16i1_idx0(<vscale x 16 x i8>* %x) nounwind {
277277
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
278278
; CHECK-NEXT: vmseq.vi v10, v8, 0
279279
; CHECK-NEXT: vfirst.m a0, v10
280-
; CHECK-NEXT: seqz a0, a0
280+
; CHECK-NEXT: snez a0, a0
281281
; CHECK-NEXT: ret
282282
%a = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
283283
%b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
@@ -292,7 +292,7 @@ define i1 @extractelt_nxv32i1_idx0(<vscale x 32 x i8>* %x) nounwind {
292292
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
293293
; CHECK-NEXT: vmseq.vi v12, v8, 0
294294
; CHECK-NEXT: vfirst.m a0, v12
295-
; CHECK-NEXT: seqz a0, a0
295+
; CHECK-NEXT: snez a0, a0
296296
; CHECK-NEXT: ret
297297
%a = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
298298
%b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
@@ -307,7 +307,7 @@ define i1 @extractelt_nxv64i1_idx0(<vscale x 64 x i8>* %x) nounwind {
307307
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
308308
; CHECK-NEXT: vmseq.vi v16, v8, 0
309309
; CHECK-NEXT: vfirst.m a0, v16
310-
; CHECK-NEXT: seqz a0, a0
310+
; CHECK-NEXT: snez a0, a0
311311
; CHECK-NEXT: ret
312312
%a = load <vscale x 64 x i8>, <vscale x 64 x i8>* %x
313313
%b = icmp eq <vscale x 64 x i8> %a, zeroinitializer

llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ define <2 x i1> @reverse_v2i1(<2 x i1> %a) {
2020
; CHECK-NEXT: vmv.x.s a0, v8
2121
; CHECK-NEXT: vslide1down.vx v8, v8, a0
2222
; CHECK-NEXT: vfirst.m a0, v0
23-
; CHECK-NEXT: seqz a0, a0
23+
; CHECK-NEXT: snez a0, a0
2424
; CHECK-NEXT: vslide1down.vx v8, v8, a0
2525
; CHECK-NEXT: vand.vi v8, v8, 1
2626
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -45,7 +45,7 @@ define <4 x i1> @reverse_v4i1(<4 x i1> %a) {
4545
; CHECK-NEXT: vmv.x.s a0, v8
4646
; CHECK-NEXT: vslide1down.vx v8, v9, a0
4747
; CHECK-NEXT: vfirst.m a0, v0
48-
; CHECK-NEXT: seqz a0, a0
48+
; CHECK-NEXT: snez a0, a0
4949
; CHECK-NEXT: vslide1down.vx v8, v8, a0
5050
; CHECK-NEXT: vand.vi v8, v8, 1
5151
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -82,7 +82,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
8282
; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31
8383
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
8484
; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
85-
; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0
85+
; RV32-BITS-UNKNOWN-NEXT: snez a0, a0
8686
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
8787
; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
8888
; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -115,7 +115,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
115115
; RV32-BITS-256-NEXT: srli a0, a0, 31
116116
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
117117
; RV32-BITS-256-NEXT: vfirst.m a0, v0
118-
; RV32-BITS-256-NEXT: seqz a0, a0
118+
; RV32-BITS-256-NEXT: snez a0, a0
119119
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
120120
; RV32-BITS-256-NEXT: vand.vi v8, v8, 1
121121
; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -148,7 +148,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
148148
; RV32-BITS-512-NEXT: srli a0, a0, 31
149149
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
150150
; RV32-BITS-512-NEXT: vfirst.m a0, v0
151-
; RV32-BITS-512-NEXT: seqz a0, a0
151+
; RV32-BITS-512-NEXT: snez a0, a0
152152
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
153153
; RV32-BITS-512-NEXT: vand.vi v8, v8, 1
154154
; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -181,7 +181,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
181181
; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63
182182
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
183183
; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
184-
; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0
184+
; RV64-BITS-UNKNOWN-NEXT: snez a0, a0
185185
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
186186
; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
187187
; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -214,7 +214,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
214214
; RV64-BITS-256-NEXT: srli a0, a0, 63
215215
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
216216
; RV64-BITS-256-NEXT: vfirst.m a0, v0
217-
; RV64-BITS-256-NEXT: seqz a0, a0
217+
; RV64-BITS-256-NEXT: snez a0, a0
218218
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
219219
; RV64-BITS-256-NEXT: vand.vi v8, v8, 1
220220
; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -247,7 +247,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
247247
; RV64-BITS-512-NEXT: srli a0, a0, 63
248248
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
249249
; RV64-BITS-512-NEXT: vfirst.m a0, v0
250-
; RV64-BITS-512-NEXT: seqz a0, a0
250+
; RV64-BITS-512-NEXT: snez a0, a0
251251
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
252252
; RV64-BITS-512-NEXT: vand.vi v8, v8, 1
253253
; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -308,7 +308,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
308308
; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31
309309
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
310310
; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
311-
; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0
311+
; RV32-BITS-UNKNOWN-NEXT: snez a0, a0
312312
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
313313
; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
314314
; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -365,7 +365,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
365365
; RV32-BITS-256-NEXT: srli a0, a0, 31
366366
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
367367
; RV32-BITS-256-NEXT: vfirst.m a0, v0
368-
; RV32-BITS-256-NEXT: seqz a0, a0
368+
; RV32-BITS-256-NEXT: snez a0, a0
369369
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
370370
; RV32-BITS-256-NEXT: vand.vi v8, v8, 1
371371
; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -422,7 +422,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
422422
; RV32-BITS-512-NEXT: srli a0, a0, 31
423423
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
424424
; RV32-BITS-512-NEXT: vfirst.m a0, v0
425-
; RV32-BITS-512-NEXT: seqz a0, a0
425+
; RV32-BITS-512-NEXT: snez a0, a0
426426
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
427427
; RV32-BITS-512-NEXT: vand.vi v8, v8, 1
428428
; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -479,7 +479,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
479479
; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63
480480
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
481481
; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
482-
; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0
482+
; RV64-BITS-UNKNOWN-NEXT: snez a0, a0
483483
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
484484
; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
485485
; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -536,7 +536,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
536536
; RV64-BITS-256-NEXT: srli a0, a0, 63
537537
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
538538
; RV64-BITS-256-NEXT: vfirst.m a0, v0
539-
; RV64-BITS-256-NEXT: seqz a0, a0
539+
; RV64-BITS-256-NEXT: snez a0, a0
540540
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
541541
; RV64-BITS-256-NEXT: vand.vi v8, v8, 1
542542
; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -593,7 +593,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
593593
; RV64-BITS-512-NEXT: srli a0, a0, 63
594594
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
595595
; RV64-BITS-512-NEXT: vfirst.m a0, v0
596-
; RV64-BITS-512-NEXT: seqz a0, a0
596+
; RV64-BITS-512-NEXT: snez a0, a0
597597
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
598598
; RV64-BITS-512-NEXT: vand.vi v8, v8, 1
599599
; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -702,7 +702,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
702702
; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31
703703
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
704704
; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
705-
; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0
705+
; RV32-BITS-UNKNOWN-NEXT: snez a0, a0
706706
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
707707
; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
708708
; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -807,7 +807,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
807807
; RV32-BITS-256-NEXT: srli a0, a0, 31
808808
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
809809
; RV32-BITS-256-NEXT: vfirst.m a0, v0
810-
; RV32-BITS-256-NEXT: seqz a0, a0
810+
; RV32-BITS-256-NEXT: snez a0, a0
811811
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
812812
; RV32-BITS-256-NEXT: vand.vi v8, v8, 1
813813
; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -912,7 +912,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
912912
; RV32-BITS-512-NEXT: srli a0, a0, 31
913913
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
914914
; RV32-BITS-512-NEXT: vfirst.m a0, v0
915-
; RV32-BITS-512-NEXT: seqz a0, a0
915+
; RV32-BITS-512-NEXT: snez a0, a0
916916
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
917917
; RV32-BITS-512-NEXT: vand.vi v8, v8, 1
918918
; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -1017,7 +1017,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
10171017
; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63
10181018
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
10191019
; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
1020-
; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0
1020+
; RV64-BITS-UNKNOWN-NEXT: snez a0, a0
10211021
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
10221022
; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
10231023
; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -1122,7 +1122,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
11221122
; RV64-BITS-256-NEXT: srli a0, a0, 63
11231123
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
11241124
; RV64-BITS-256-NEXT: vfirst.m a0, v0
1125-
; RV64-BITS-256-NEXT: seqz a0, a0
1125+
; RV64-BITS-256-NEXT: snez a0, a0
11261126
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
11271127
; RV64-BITS-256-NEXT: vand.vi v8, v8, 1
11281128
; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -1227,7 +1227,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
12271227
; RV64-BITS-512-NEXT: srli a0, a0, 63
12281228
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
12291229
; RV64-BITS-512-NEXT: vfirst.m a0, v0
1230-
; RV64-BITS-512-NEXT: seqz a0, a0
1230+
; RV64-BITS-512-NEXT: snez a0, a0
12311231
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
12321232
; RV64-BITS-512-NEXT: vand.vi v8, v8, 1
12331233
; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -1434,7 +1434,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
14341434
; RV32-BITS-UNKNOWN-NEXT: srli a1, a1, 31
14351435
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a1
14361436
; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
1437-
; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0
1437+
; RV32-BITS-UNKNOWN-NEXT: snez a0, a0
14381438
; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
14391439
; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
14401440
; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -1637,7 +1637,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
16371637
; RV32-BITS-256-NEXT: srli a1, a1, 31
16381638
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a1
16391639
; RV32-BITS-256-NEXT: vfirst.m a0, v0
1640-
; RV32-BITS-256-NEXT: seqz a0, a0
1640+
; RV32-BITS-256-NEXT: snez a0, a0
16411641
; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0
16421642
; RV32-BITS-256-NEXT: vand.vi v8, v8, 1
16431643
; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -1840,7 +1840,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
18401840
; RV32-BITS-512-NEXT: srli a1, a1, 31
18411841
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a1
18421842
; RV32-BITS-512-NEXT: vfirst.m a0, v0
1843-
; RV32-BITS-512-NEXT: seqz a0, a0
1843+
; RV32-BITS-512-NEXT: snez a0, a0
18441844
; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0
18451845
; RV32-BITS-512-NEXT: vand.vi v8, v8, 1
18461846
; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
@@ -2040,7 +2040,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
20402040
; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63
20412041
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
20422042
; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0
2043-
; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0
2043+
; RV64-BITS-UNKNOWN-NEXT: snez a0, a0
20442044
; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0
20452045
; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
20462046
; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
@@ -2240,7 +2240,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
22402240
; RV64-BITS-256-NEXT: srli a0, a0, 63
22412241
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
22422242
; RV64-BITS-256-NEXT: vfirst.m a0, v0
2243-
; RV64-BITS-256-NEXT: seqz a0, a0
2243+
; RV64-BITS-256-NEXT: snez a0, a0
22442244
; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0
22452245
; RV64-BITS-256-NEXT: vand.vi v8, v8, 1
22462246
; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
@@ -2440,7 +2440,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
24402440
; RV64-BITS-512-NEXT: srli a0, a0, 63
24412441
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
24422442
; RV64-BITS-512-NEXT: vfirst.m a0, v0
2443-
; RV64-BITS-512-NEXT: seqz a0, a0
2443+
; RV64-BITS-512-NEXT: snez a0, a0
24442444
; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0
24452445
; RV64-BITS-512-NEXT: vand.vi v8, v8, 1
24462446
; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0

0 commit comments

Comments
 (0)