diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 9b38eb6ab24df3..dcfff4552f11ea 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -2603,6 +2603,52 @@ Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, return RetVal; } +static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder, + const DataLayout &DL) { + Value *Src = CI.getOperand(0); + PointerType *SrcPTy = cast(Src->getType()); + PointerType *DstPTy = cast(CI.getType()); + Type *DstElTy = DstPTy->getElementType(); + Type *SrcElTy = SrcPTy->getElementType(); + + // When the type pointed to is not sized the cast cannot be + // turned into a gep. + if (!SrcElTy->isSized()) + return nullptr; + + // If the source and destination are pointers, and this cast is equivalent + // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. + // This can enhance SROA and other transforms that want type-safe pointers. + unsigned NumZeros = 0; + while (SrcElTy && SrcElTy != DstElTy) { + SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); + ++NumZeros; + } + + // If we found a path from the src to dest, create the getelementptr now. + if (SrcElTy == DstElTy) { + SmallVector Idxs(NumZeros + 1, Builder.getInt32(0)); + GetElementPtrInst *GEP = + GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); + + // If the source pointer is dereferenceable, then assume it points to an + // allocated object and apply "inbounds" to the GEP. + bool CanBeNull, CanBeFreed; + if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) { + // In a non-default address space (not 0), a null pointer can not be + // assumed inbounds, so ignore that case (dereferenceable_or_null). + // The reason is that 'null' is not treated differently in these address + // spaces, and we consequently ignore the 'gep inbounds' special case + // for 'null' which allows 'inbounds' on 'null' if the indices are + // zeros. + if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) + GEP->setIsInBounds(); + } + return GEP; + } + return nullptr; +} + Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { // If the operands are integer typed then apply the integer transforms, // otherwise just apply the common ones. @@ -2616,11 +2662,6 @@ Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { return replaceInstUsesWith(CI, Src); if (isa(SrcTy) && isa(DestTy)) { - PointerType *SrcPTy = cast(SrcTy); - PointerType *DstPTy = cast(DestTy); - Type *DstElTy = DstPTy->getElementType(); - Type *SrcElTy = SrcPTy->getElementType(); - // If we are casting a alloca to a pointer to a type of the same // size, rewrite the allocation instruction to allocate the "right" type. // There is no need to modify malloc calls because it is their bitcast that @@ -2629,43 +2670,8 @@ Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) return V; - // When the type pointed to is not sized the cast cannot be - // turned into a gep. - Type *PointeeType = - cast(Src->getType()->getScalarType())->getElementType(); - if (!PointeeType->isSized()) - return nullptr; - - // If the source and destination are pointers, and this cast is equivalent - // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. - // This can enhance SROA and other transforms that want type-safe pointers. - unsigned NumZeros = 0; - while (SrcElTy && SrcElTy != DstElTy) { - SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); - ++NumZeros; - } - - // If we found a path from the src to dest, create the getelementptr now. - if (SrcElTy == DstElTy) { - SmallVector Idxs(NumZeros + 1, Builder.getInt32(0)); - GetElementPtrInst *GEP = - GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); - - // If the source pointer is dereferenceable, then assume it points to an - // allocated object and apply "inbounds" to the GEP. - bool CanBeNull, CanBeFreed; - if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) { - // In a non-default address space (not 0), a null pointer can not be - // assumed inbounds, so ignore that case (dereferenceable_or_null). - // The reason is that 'null' is not treated differently in these address - // spaces, and we consequently ignore the 'gep inbounds' special case - // for 'null' which allows 'inbounds' on 'null' if the indices are - // zeros. - if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) - GEP->setIsInBounds(); - } - return GEP; - } + if (Instruction *I = convertBitCastToGEP(CI, Builder, DL)) + return I; } if (FixedVectorType *DestVTy = dyn_cast(DestTy)) { diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll b/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll index d06564dab985a2..ea524a6afcc222 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll @@ -12,12 +12,11 @@ define {i8*, i32} @f(i8* %buffer, i32* %array) { ; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[ARRAY_SPILL_ADDR]], align 8 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAY]], align 4 ; CHECK-NEXT: [[LOAD_POS:%.*]] = icmp sgt i32 [[LOAD]], 0 -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[LOAD_POS]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS]], i32 [[LOAD]], i32 0 -; CHECK-NEXT: [[TMP2:%.*]] = bitcast void (i8*, i1)* [[TMP0]] to i8* -; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i32 } undef, i8* [[TMP2]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = insertvalue { i8*, i32 } [[TMP3]], i32 [[TMP1]], 1 -; CHECK-NEXT: ret { i8*, i32 } [[TMP4]] +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[LOAD_POS]], i32 [[LOAD]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS]], i8* bitcast (void (i8*, i1)* @f.resume.0 to i8*), i8* bitcast (void (i8*, i1)* @f.resume.1 to i8*) +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8*, i32 } undef, i8* [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i32 } [[TMP2]], i32 [[TMP0]], 1 +; CHECK-NEXT: ret { i8*, i32 } [[TMP3]] ; entry: %id = call token @llvm.coro.id.retcon.once(i32 8, i32 8, i8* %buffer, i8* bitcast (void (i8*, i1)* @prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*)) @@ -58,10 +57,10 @@ define void @test(i32* %array) { ; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[TMP0]], align 8 ; CHECK-NEXT: [[LOAD_I:%.*]] = load i32, i32* [[ARRAY]], align 4 ; CHECK-NEXT: [[LOAD_POS_I:%.*]] = icmp sgt i32 [[LOAD_I]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS_I]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[LOAD_POS_I]], i32 [[LOAD_I]], i32 0 -; CHECK-NEXT: call void @print(i32 [[TMP2]]) -; CHECK-NEXT: call void [[TMP1]](i8* nonnull [[DOTSUB]], i1 zeroext false) +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS_I]], i32 [[LOAD_I]], i32 0 +; CHECK-NEXT: call void @print(i32 [[TMP1]]) +; CHECK-NEXT: [[CONT_CAST:%.*]] = select i1 [[LOAD_POS_I]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1 +; CHECK-NEXT: call void [[CONT_CAST]](i8* nonnull [[DOTSUB]], i1 zeroext false) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/InstCombine/bitcast.ll b/llvm/test/Transforms/InstCombine/bitcast.ll index 9c509d96913145..3699f7fa301c9b 100644 --- a/llvm/test/Transforms/InstCombine/bitcast.ll +++ b/llvm/test/Transforms/InstCombine/bitcast.ll @@ -584,8 +584,7 @@ declare void @f1() declare void @f2() define i8* @select_bitcast_unsized_pointer(i1 %c) { ; CHECK-LABEL: @select_bitcast_unsized_pointer( -; CHECK-NEXT: [[S:%.*]] = select i1 [[C:%.*]], void ()* @f1, void ()* @f2 -; CHECK-NEXT: [[B:%.*]] = bitcast void ()* [[S]] to i8* +; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], i8* bitcast (void ()* @f1 to i8*), i8* bitcast (void ()* @f2 to i8*) ; CHECK-NEXT: ret i8* [[B]] ; %s = select i1 %c, void ()* @f1, void ()* @f2