diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 805bbe40bd7c7..1036b8ae963a2 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -677,9 +677,9 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI, if (isModSet(AA->getModRefInfo(SI, LoadLoc))) UseMemMove = true; - uint64_t Size = DL.getTypeStoreSize(T); - IRBuilder<> Builder(P); + Value *Size = Builder.CreateTypeSize(Builder.getInt64Ty(), + DL.getTypeStoreSize(T)); Instruction *M; if (UseMemMove) M = Builder.CreateMemMove( diff --git a/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll b/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll index 84b06f6071ff6..42ad92ee03f4d 100644 --- a/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll +++ b/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll @@ -85,5 +85,43 @@ define void @callslotoptzn( %val, ptr %out) { ret void } +%0 = type { } +%1 = type { , } + +define void @memmove_vector(ptr %a, ptr %b) { +; CHECK-LABEL: @memmove_vector( +; CHECK-NEXT: [[V:%.*]] = load , ptr [[A:%.*]], align 1 +; CHECK-NEXT: store [[V]], ptr [[B:%.*]], align 1 +; CHECK-NEXT: ret void +; + %v = load , ptr %a, align 1 + store %v, ptr %b, align 1 + ret void +} + +define void @memmove_agg1(ptr %a, ptr %b) { +; CHECK-LABEL: @memmove_agg1( +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8 +; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[B:%.*]], ptr align 1 [[A:%.*]], i64 [[TMP2]], i1 false) +; CHECK-NEXT: ret void +; + %v = load %0, ptr %a, align 1 + store %0 %v, ptr %b, align 1 + ret void +} + +define void @memmove_agg2(ptr %a, ptr %b) { +; CHECK-LABEL: @memmove_agg2( +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 16 +; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[B:%.*]], ptr align 1 [[A:%.*]], i64 [[TMP2]], i1 false) +; CHECK-NEXT: ret void +; + %v = load %1, ptr %a, align 1 + store %1 %v, ptr %b, align 1 + ret void +} + declare @llvm.experimental.stepvector.nxv4i32() declare void @llvm.masked.scatter.nxv4f32.nxv4p0f32( , , i32, )