diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp index 32af1fa4f0e0a..c01d03f644724 100644 --- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp @@ -125,7 +125,7 @@ static unsigned adjustForEndian(const DataLayout &DL, unsigned VectorWidth, // br label %else // // else: ; preds = %0, %cond.load -// %res.phi.else = phi <16 x i32> [ %5, %cond.load ], [ undef, %0 ] +// %res.phi.else = phi <16 x i32> [ %5, %cond.load ], [ poison, %0 ] // %6 = extractelement <16 x i1> %mask, i32 1 // br i1 %6, label %cond.load1, label %else2 // @@ -386,11 +386,11 @@ static void scalarizeMaskedStore(const DataLayout &DL, CallInst *CI, // cond.load: // %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 // %Load0 = load i32, i32* %Ptr0, align 4 -// %Res0 = insertelement <16 x i32> undef, i32 %Load0, i32 0 +// %Res0 = insertelement <16 x i32> poison, i32 %Load0, i32 0 // br label %else // // else: -// %res.phi.else = phi <16 x i32>[%Res0, %cond.load], [undef, %0] +// %res.phi.else = phi <16 x i32>[%Res0, %cond.load], [poison, %0] // %Mask1 = extractelement <16 x i1> %Mask, i32 1 // br i1 %Mask1, label %cond.load1, label %else2 // @@ -645,7 +645,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI, Value *VResult = PassThru; // Shorten the way if the mask is a vector of constants. - // Create a build_vector pattern, with loads/undefs as necessary and then + // Create a build_vector pattern, with loads/poisons as necessary and then // shuffle blend with the pass through value. if (isConstantIntVector(Mask)) { unsigned MemIndex = 0; @@ -654,7 +654,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI, for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { Value *InsertElt; if (cast(Mask)->getAggregateElement(Idx)->isNullValue()) { - InsertElt = UndefValue::get(EltTy); + InsertElt = PoisonValue::get(EltTy); ShuffleMask[Idx] = Idx + VectorWidth; } else { Value *NewPtr = diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll index a71da27c306e9..11503611691c9 100644 --- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll +++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll @@ -47,7 +47,7 @@ define <2 x i64> @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %passthru) { define <2 x i64> @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %passthru) { ; CHECK-LABEL: @scalarize_v2i64_zero_mask( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i64> undef, <2 x i64> [[PASSTHRU:%.*]], <2 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i64> poison, <2 x i64> [[PASSTHRU:%.*]], <2 x i32> ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0(ptr %p, <2 x i1> , <2 x i64> %passthru) @@ -58,7 +58,7 @@ define <2 x i64> @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %passthru) { ; CHECK-LABEL: @scalarize_v2i64_const_mask( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0 ; CHECK-NEXT: [[LOAD1:%.*]] = load i64, ptr [[TMP1]], align 1 -; CHECK-NEXT: [[RES1:%.*]] = insertelement <2 x i64> , i64 [[LOAD1]], i64 1 +; CHECK-NEXT: [[RES1:%.*]] = insertelement <2 x i64> poison, i64 [[LOAD1]], i64 1 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i64> [[RES1]], <2 x i64> [[PASSTHRU:%.*]], <2 x i32> ; CHECK-NEXT: ret <2 x i64> [[TMP2]] ;