diff --git a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h index abcf9a841d5fc..e8124f72a1a81 100644 --- a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h +++ b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h @@ -32,14 +32,19 @@ class InterestingMemoryOperand { Value *MaybeEVL; // The Stride Value, if we're looking at a strided load/store. Value *MaybeStride; + // The Offset Value, if we're looking at a indexed load/store. The + // offset actually means byte-offset instead of array index. + Value *MaybeByteOffset; InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, class Type *OpType, MaybeAlign Alignment, Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr, - Value *MaybeStride = nullptr) + Value *MaybeStride = nullptr, + Value *MaybeByteOffset = nullptr) : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { + MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride), + MaybeByteOffset(MaybeByteOffset) { const DataLayout &DL = I->getDataLayout(); TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); PtrUse = &I->getOperandUse(OperandNo); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index b324db407b23e..0e97c85e82da0 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -2785,6 +2785,44 @@ bool RISCVTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, Alignment, Mask, EVL, Stride); return true; } + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei_mask: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsuxei: { + // Intrinsic interface (only listed ordered version): + // riscv_vloxei(merge, ptr, index, vl) + // riscv_vloxei_mask(merge, ptr, index, mask, vl, policy) + // riscv_vsoxei(val, ptr, index, vl) + // riscv_vsoxei_mask(val, ptr, index, mask, vl, policy) + bool IsWrite = Inst->getType()->isVoidTy(); + Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType(); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + Value *Mask; + if (HasMask) { + Mask = Inst->getArgOperand(VLIndex - 1); + } else { + // Mask cannot be nullptr here: vector GEP produces , + // and casting that to scalar i64 triggers a vector/scalar mismatch + // assertion in CreatePointerCast. Use an all-true mask so ASan lowers it + // via extractelement instead. + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Mask = ConstantInt::getTrue(MaskType); + } + Value *EVL = Inst->getArgOperand(VLIndex); + Value *OffsetOp = Inst->getArgOperand(PtrOperandNo + 1); + Info.InterestingOperands.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, + Align(1), Mask, EVL, + /* Stride */ nullptr, OffsetOp); + return true; + } } return false; } diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index a20e0dec8841b..cdae9a7271915 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1786,6 +1786,25 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, else NumInstrumentedReads++; + if (O.MaybeByteOffset) { + Type *Ty = Type::getInt8Ty(*C); + IRBuilder IB(O.getInsn()); + + Value *OffsetOp = O.MaybeByteOffset; + if (TargetTriple.isRISCV()) { + Type *OffsetTy = OffsetOp->getType(); + // RVV indexed loads/stores zero-extend offset operands which are narrower + // than XLEN to XLEN. + if (OffsetTy->getScalarType()->getIntegerBitWidth() < + static_cast(LongSize)) { + VectorType *OrigType = cast(OffsetTy); + Type *ExtendTy = VectorType::get(IntptrTy, OrigType); + OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy); + } + } + Addr = IB.CreateGEP(Ty, Addr, {OffsetOp}); + } + unsigned Granularity = 1 << Mapping.Scale; if (O.MaybeMask) { instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL, diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll index f3312ce0c5bd2..919f16b103090 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll @@ -1256,7 +1256,31 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vloxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1281,7 +1305,31 @@ define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vloxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1305,7 +1353,31 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16( @llvm.riscv.vloxei.nxv1f32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1f32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1328,7 +1400,31 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vluxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1353,7 +1449,31 @@ define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vluxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1377,7 +1497,31 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, < ; CHECK-LABEL: @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]] +; CHECK: 12: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP6]], i64 [[IV]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) ; CHECK-NEXT: ret void ; entry: @@ -1401,7 +1545,31 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-LABEL: @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]]) ; CHECK-NEXT: ret void ; entry: @@ -1425,7 +1593,31 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, < ; CHECK-LABEL: @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]] +; CHECK: 12: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP6]], i64 [[IV]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) ; CHECK-NEXT: ret void ; entry: @@ -1449,7 +1641,31 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-LABEL: @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]]) ; CHECK-NEXT: ret void ; entry: