diff --git a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c index 26d17e7b23aaf..3dd8a361a822c 100644 --- a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c +++ b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c @@ -25,12 +25,6 @@ // CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_500:.*]] = {{.*}}, i32 500, i32 15 } } // CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_700:.*]] = {{.*}}, i32 700, i32 15 } } // CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_800:.*]] = {{.*}}, i32 800, i32 15 } } -// CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_900:.*]] = {{.*}}, i32 900, i32 15 } } -// CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1100:.*]] = {{.*}}, i32 1100, i32 15 } } -// CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1200:.*]] = {{.*}}, i32 1200, i32 15 } } -// CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1300:.*]] = {{.*}}, i32 1300, i32 15 } } -// CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1500:.*]] = {{.*}}, i32 1500, i32 15 } } -// CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1600:.*]] = {{.*}}, i32 1600, i32 15 } } // CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1700:.*]] = {{.*}}, i32 1700, i32 15 } } // CHECK-SANITIZE-ANYRECOVER-DAG: @[[LINE_1800:.*]] = {{.*}}, i32 1800, i32 20 } } @@ -225,172 +219,6 @@ char *nullptr_allones_BAD(void) { //------------------------------------------------------------------------------ -char *one_var(unsigned long offset) { - // CHECK: define{{.*}} ptr @one_var(i64 noundef %[[OFFSET:.*]]) - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 - // CHECK-NEXT: store i64 %[[OFFSET]], ptr %[[OFFSET_ADDR]], align 8 - // CHECK-NEXT: %[[OFFSET_RELOADED:.*]] = load i64, ptr %[[OFFSET_ADDR]], align 8 - // CHECK-NEXT: %[[ADD_PTR:.*]] = getelementptr inbounds nuw i8, ptr inttoptr (i64 1 to ptr), i64 %[[OFFSET_RELOADED]] - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize - // CHECK-SANITIZE-NEXT: %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_GEP:.*]] = add i64 1, %[[COMPUTED_OFFSET]], !nosanitize - // CHECK-SANITIZE-NEXT: %[[OTHER_IS_NOT_NULL:.*]] = icmp ne ptr inttoptr (i64 1 to ptr), null - // CHECK-SANITIZE-NEXT: %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[OTHER_IS_NOT_NULL]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], 1, !nosanitize - // CHECK-SANITIZE-NEXT: %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize - // CHECK-SANITIZE-NEXT: %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize - // CHECK-SANITIZE-NEXT: br i1 %[[GEP_IS_OKAY]], label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize - // CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(ptr @[[LINE_900]], i64 1, i64 %[[COMPUTED_GEP]]) - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(ptr @[[LINE_900]], i64 1, i64 %[[COMPUTED_GEP]]) - // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize - // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize - // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: ret ptr %[[ADD_PTR]] - static char *const base = (char *)1; -#line 900 - return base + offset; -} - -char *one_zero(void) { - // CHECK: define{{.*}} ptr @one_zero() - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-NEXT: ret ptr inttoptr (i64 1 to ptr) - static char *const base = (char *)1; - static const unsigned long offset = 0; -#line 1000 - return base + offset; -} - -char *one_one_OK(void) { - // CHECK: define{{.*}} ptr @one_one_OK() - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-SANITIZE-NEXT: %[[CMP1:.*]] = icmp ne ptr inttoptr (i64 1 to ptr), null, !nosanitize - // CHECK-SANITIZE-NEXT: %[[CMP2:.*]] = icmp ne i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 1) to i64), i64 1), i64 1), 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COND:.*]] = icmp eq i1 %[[CMP1]], %[[CMP2]], !nosanitize - // CHECK-SANITIZE-NEXT: br i1 %[[COND]], label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize - // CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(ptr @[[LINE_1100]], i64 1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 1) to i64), i64 1), i64 1)) - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(ptr @[[LINE_1100]], i64 1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 1) to i64), i64 1), i64 1)) - // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize - // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize - // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: ret ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 1) - static char *const base = (char *)1; - static const unsigned long offset = 1; -#line 1100 - return base + offset; -} - -char *one_allones_BAD(void) { - // CHECK: define{{.*}} ptr @one_allones_BAD() - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-SANITIZE-NEXT: %[[CMP1:.*]] = icmp ne ptr inttoptr (i64 1 to ptr), null, !nosanitize - // CHECK-SANITIZE-NEXT: %[[CMP2:.*]] = icmp ne i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 -1) to i64), i64 1), i64 1), 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COND:.*]] = icmp eq i1 %[[CMP1]], %[[CMP2]], !nosanitize - // CHECK-SANITIZE-NEXT: br i1 %[[COND]], label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize - // CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(ptr @[[LINE_1200]], i64 1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 -1) to i64), i64 1), i64 1)) - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(ptr @[[LINE_1200]], i64 1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 -1) to i64), i64 1), i64 1)) - // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize - // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize - // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: ret ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 1 to ptr), i64 -1) - static char *const base = (char *)1; - static const unsigned long offset = -1; -#line 1200 - return base + offset; -} - -//------------------------------------------------------------------------------ - -char *allones_var(unsigned long offset) { - // CHECK: define{{.*}} ptr @allones_var(i64 noundef %[[OFFSET:.*]]) - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 - // CHECK-NEXT: store i64 %[[OFFSET]], ptr %[[OFFSET_ADDR]], align 8 - // CHECK-NEXT: %[[OFFSET_RELOADED:.*]] = load i64, ptr %[[OFFSET_ADDR]], align 8 - // CHECK-NEXT: %[[ADD_PTR:.*]] = getelementptr inbounds nuw i8, ptr inttoptr (i64 -1 to ptr), i64 %[[OFFSET_RELOADED]] - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize - // CHECK-SANITIZE-NEXT: %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_GEP:.*]] = add i64 -1, %[[COMPUTED_OFFSET]], !nosanitize - // CHECK-SANITIZE-NEXT: %[[OTHER_IS_NOT_NULL:.*]] = icmp ne ptr inttoptr (i64 -1 to ptr), null, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[OTHER_IS_NOT_NULL]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], -1, !nosanitize - // CHECK-SANITIZE-NEXT: %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize - // CHECK-SANITIZE-NEXT: %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize - // CHECK-SANITIZE-NEXT: br i1 %[[GEP_IS_OKAY]], label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize - // CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(ptr @[[LINE_1300]], i64 -1, i64 %[[COMPUTED_GEP]]) - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(ptr @[[LINE_1300]], i64 -1, i64 %[[COMPUTED_GEP]]) - // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize - // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize - // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: ret ptr %[[ADD_PTR]] - static char *const base = (char *)-1; -#line 1300 - return base + offset; -} - -char *allones_zero_OK(void) { - // CHECK: define{{.*}} ptr @allones_zero_OK() - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-NEXT: ret ptr inttoptr (i64 -1 to ptr) - static char *const base = (char *)-1; - static const unsigned long offset = 0; -#line 1400 - return base + offset; -} - -char *allones_one_BAD(void) { - // CHECK: define{{.*}} ptr @allones_one_BAD() - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-SANITIZE-NEXT: %[[CMP1:.*]] = icmp ne ptr inttoptr (i64 -1 to ptr), null, !nosanitize - // CHECK-SANITIZE-NEXT: %[[CMP2:.*]] = icmp ne i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 1) to i64), i64 -1), i64 -1), 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COND:.*]] = icmp eq i1 %[[CMP1]], %[[CMP2]], !nosanitize - // CHECK-SANITIZE-NEXT: br i1 %[[COND]], label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize - // CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(ptr @[[LINE_1500]], i64 -1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 1) to i64), i64 -1), i64 -1)) - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(ptr @[[LINE_1500]], i64 -1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 1) to i64), i64 -1), i64 -1)) - // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize - // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize - // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: ret ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 1) - static char *const base = (char *)-1; - static const unsigned long offset = 1; -#line 1500 - return base + offset; -} - -char *allones_allones_OK(void) { - // CHECK: define{{.*}} ptr @allones_allones_OK() - // CHECK-NEXT: [[ENTRY:.*]]: - // CHECK-SANITIZE-NEXT: %[[CMP1:.*]] = icmp ne ptr inttoptr (i64 -1 to ptr), null, !nosanitize - // CHECK-SANITIZE-NEXT: %[[CMP2:.*]] = icmp ne i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 -1) to i64), i64 -1), i64 -1), 0, !nosanitize - // CHECK-SANITIZE-NEXT: %[[COND:.*]] = icmp eq i1 %[[CMP1]], %[[CMP2]], !nosanitize - // CHECK-SANITIZE-NEXT: br i1 %[[COND]], label %[[CONT:.*]], label %[[HANDLER_POINTER_OVERFLOW:[^,]+]],{{.*}} !nosanitize - // CHECK-SANITIZE: [[HANDLER_POINTER_OVERFLOW]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_pointer_overflow_abort(ptr @[[LINE_1600]], i64 -1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 -1) to i64), i64 -1), i64 -1)) - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_pointer_overflow(ptr @[[LINE_1600]], i64 -1, i64 add (i64 sub (i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 -1) to i64), i64 -1), i64 -1)) - // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.ubsantrap(i8 19){{.*}}, !nosanitize - // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize - // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: ret ptr getelementptr inbounds nuw (i8, ptr inttoptr (i64 -1 to ptr), i64 -1) - static char *const base = (char *)-1; - static const unsigned long offset = -1; -#line 1600 - return base + offset; -} - // C++ does not allow void* arithmetic even as a GNU extension. Replace void* // with char* in that case to keep test expectations the same. #ifdef __cplusplus diff --git a/clang/test/CodeGenCXX/builtin-invoke.cpp b/clang/test/CodeGenCXX/builtin-invoke.cpp index af66dfd4dae30..0f84f83e18add 100644 --- a/clang/test/CodeGenCXX/builtin-invoke.cpp +++ b/clang/test/CodeGenCXX/builtin-invoke.cpp @@ -55,7 +55,7 @@ extern "C" void call_memptr(std::reference_wrapper wrapper) { // CHECK-NEXT: br label %memptr.end // CHECK-EMPTY: // CHECK-NEXT: memptr.end: - // CHECK-NEXT: %2 = phi ptr [ %memptr.virtualfn, %memptr.virtual ], [ @_ZN8Callable4funcEv, %memptr.nonvirtual ] + // CHECK-NEXT: %2 = phi ptr [ %memptr.virtualfn, %memptr.virtual ], [ inttoptr (i64 ptrtoint (ptr @_ZN8Callable4funcEv to i64) to ptr), %memptr.nonvirtual ] // CHECK-NEXT: call void %2(ptr noundef nonnull align 1 dereferenceable(1) %0) // CHECK-NEXT: ret void } diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h index 14685abef01e9..9f56779a9b935 100644 --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -601,11 +601,9 @@ class CastInst : public UnaryInstruction { Instruction::CastOps firstOpcode, ///< Opcode of first cast Instruction::CastOps secondOpcode, ///< Opcode of second cast Type *SrcTy, ///< SrcTy of 1st cast - Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast - Type *DstTy, ///< DstTy of 2nd cast - Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null - Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null - Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null + Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast + Type *DstTy, ///< DstTy of 2nd cast + const DataLayout *DL ///< Optional data layout ); /// Return the opcode of this CastInst diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index d52b073854630..b744537aeb474 100755 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1482,6 +1482,15 @@ Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL) { assert(Instruction::isCast(Opcode)); + + if (auto *CE = dyn_cast(C)) + if (CE->isCast()) + if (unsigned NewOp = CastInst::isEliminableCastPair( + Instruction::CastOps(CE->getOpcode()), + Instruction::CastOps(Opcode), CE->getOperand(0)->getType(), + C->getType(), DestTy, &DL)) + return ConstantFoldCastOperand(NewOp, CE->getOperand(0), DestTy, DL); + switch (Opcode) { default: llvm_unreachable("Missing case"); diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 0d978d4da125e..ca6c321ff2662 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5425,15 +5425,8 @@ static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, if (Src->getType() == Ty) { auto FirstOp = CI->getOpcode(); auto SecondOp = static_cast(CastOpc); - Type *SrcIntPtrTy = - SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; - Type *MidIntPtrTy = - MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; - Type *DstIntPtrTy = - DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, - SrcIntPtrTy, MidIntPtrTy, - DstIntPtrTy) == Instruction::BitCast) + &Q.DL) == Instruction::BitCast) return Src; } } diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp index 6b202baf8ccef..3842b1afab05d 100644 --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -55,15 +55,8 @@ foldConstantCastPair( Type *MidTy = Op->getType(); Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode()); Instruction::CastOps secondOp = Instruction::CastOps(opc); - - // Assume that pointers are never more than 64 bits wide, and only use this - // for the middle type. Otherwise we could end up folding away illegal - // bitcasts between address spaces with different sizes. - IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext()); - - // Let CastInst::isEliminableCastPair do the heavy lifting. return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy, - nullptr, FakeIntPtrTy, nullptr); + /*DL=*/nullptr); } static Constant *FoldBitCast(Constant *V, Type *DestTy) { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index 941e41f3127d5..88e7c44a8b885 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -2824,10 +2824,10 @@ bool CastInst::isNoopCast(const DataLayout &DL) const { /// The function returns a resultOpcode so these two casts can be replaced with: /// * %Replacement = resultOpcode %SrcTy %x to DstTy /// If no such cast is permitted, the function returns 0. -unsigned CastInst::isEliminableCastPair( - Instruction::CastOps firstOp, Instruction::CastOps secondOp, - Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, - Type *DstIntPtrTy) { +unsigned CastInst::isEliminableCastPair(Instruction::CastOps firstOp, + Instruction::CastOps secondOp, + Type *SrcTy, Type *MidTy, Type *DstTy, + const DataLayout *DL) { // Define the 144 possibilities for these two cast instructions. The values // in this matrix determine what to do in a given situation and select the // case in the switch below. The rows correspond to firstOp, the columns @@ -2936,24 +2936,16 @@ unsigned CastInst::isEliminableCastPair( return 0; // Cannot simplify if address spaces are different! - if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) + if (SrcTy != DstTy) return 0; - unsigned MidSize = MidTy->getScalarSizeInBits(); - // We can still fold this without knowing the actual sizes as long we - // know that the intermediate pointer is the largest possible + // Cannot simplify if the intermediate integer size is smaller than the // pointer size. - // FIXME: Is this always true? - if (MidSize == 64) - return Instruction::BitCast; - - // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. - if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) + unsigned MidSize = MidTy->getScalarSizeInBits(); + if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy)) return 0; - unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); - if (MidSize >= PtrSize) - return Instruction::BitCast; - return 0; + + return Instruction::BitCast; } case 8: { // ext, trunc -> bitcast, if the SrcTy and DstTy are the same @@ -2973,14 +2965,17 @@ unsigned CastInst::isEliminableCastPair( // zext, sext -> zext, because sext can't sign extend after zext return Instruction::ZExt; case 11: { - // inttoptr, ptrtoint/ptrtoaddr -> bitcast if SrcSize<=PtrSize and - // SrcSize==DstSize - if (!MidIntPtrTy) + // inttoptr, ptrtoint/ptrtoaddr -> bitcast if SrcSize<=PtrSize/AddrSize + // and SrcSize==DstSize + if (!DL) return 0; - unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); + unsigned MidSize = secondOp == Instruction::PtrToAddr + ? DL->getAddressSizeInBits(MidTy) + : DL->getPointerTypeSizeInBits(MidTy); unsigned SrcSize = SrcTy->getScalarSizeInBits(); unsigned DstSize = DstTy->getScalarSizeInBits(); - if (SrcSize <= PtrSize && SrcSize == DstSize) + // TODO: Could also produce zext or trunc here. + if (SrcSize <= MidSize && SrcSize == DstSize) return Instruction::BitCast; return 0; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 9ca8194b44f8f..56194fef572d2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -137,13 +137,10 @@ InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, Instruction::CastOps secondOp = CI2->getOpcode(); Type *SrcIntPtrTy = SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; - Type *MidIntPtrTy = - MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; Type *DstIntPtrTy = DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, - DstTy, SrcIntPtrTy, MidIntPtrTy, - DstIntPtrTy); + DstTy, &DL); // We don't want to form an inttoptr or ptrtoint that converts to an integer // type that differs from the pointer size. diff --git a/llvm/test/Assembler/ConstantExprFold.ll b/llvm/test/Assembler/ConstantExprFold.ll index 840ed06d42228..33ee49296de0a 100644 --- a/llvm/test/Assembler/ConstantExprFold.ll +++ b/llvm/test/Assembler/ConstantExprFold.ll @@ -30,9 +30,9 @@ ; Need a function to make update_test_checks.py work. ;. ; CHECK: @A = global i64 0 -; CHECK: @add = global ptr @A -; CHECK: @sub = global ptr @A -; CHECK: @xor = global ptr @A +; CHECK: @add = global ptr inttoptr (i64 ptrtoint (ptr @A to i64) to ptr) +; CHECK: @sub = global ptr inttoptr (i64 ptrtoint (ptr @A to i64) to ptr) +; CHECK: @xor = global ptr inttoptr (i64 ptrtoint (ptr @A to i64) to ptr) ; CHECK: @B = external global %Ty ; CHECK: @cons = weak global i32 0, align 8 ; CHECK: @gep1 = global <2 x ptr> undef diff --git a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll index 26728a74d032c..70c8fe6db50a0 100644 --- a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll +++ b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll @@ -44,16 +44,6 @@ entry: ret void } -define void @stores_ptrtoint_constexpr() { -; CHECK-LABEL: @stores_ptrtoint_constexpr( -; CHECK-NEXT: entry: -; CHECK-NEXT: ret void -; -entry: - store i32 0, ptr inttoptr (i64 ptrtoint (ptr @global.20ptr to i64) to ptr), align 8 - ret void -} - @gv = internal unnamed_addr global [3 x ptr] zeroinitializer, align 16 @gv2 = internal unnamed_addr global i32 0, align 4 diff --git a/llvm/test/Transforms/InstSimplify/ptrtoint.ll b/llvm/test/Transforms/InstSimplify/ptrtoint.ll index 68af4d7d974db..3b0e052a5638f 100644 --- a/llvm/test/Transforms/InstSimplify/ptrtoint.ll +++ b/llvm/test/Transforms/InstSimplify/ptrtoint.ll @@ -139,11 +139,12 @@ define i128 @ptrtoint_gep_sub_wide_type(ptr %ptr, i128 %end.addr) { ret i128 %end.addr2 } -; FIXME: This is a miscompile. define ptr addrspace(1) @inttoptr_of_ptrtoint_wide(ptr addrspace(1) %ptr) { ; CHECK-LABEL: define ptr addrspace(1) @inttoptr_of_ptrtoint_wide( ; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) { -; CHECK-NEXT: ret ptr addrspace(1) [[PTR]] +; CHECK-NEXT: [[INT:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64 +; CHECK-NEXT: [[PTR2:%.*]] = inttoptr i64 [[INT]] to ptr addrspace(1) +; CHECK-NEXT: ret ptr addrspace(1) [[PTR2]] ; %int = ptrtoint ptr addrspace(1) %ptr to i64 %ptr2 = inttoptr i64 %int to ptr addrspace(1) diff --git a/llvm/test/Transforms/LowerTypeTests/simple.ll b/llvm/test/Transforms/LowerTypeTests/simple.ll index 6fb8f6f900e07..173a6aef8f1de 100644 --- a/llvm/test/Transforms/LowerTypeTests/simple.ll +++ b/llvm/test/Transforms/LowerTypeTests/simple.ll @@ -56,7 +56,7 @@ define i1 @foo(ptr %p) { ; CHECK: [[R8:%[^ ]*]] = getelementptr i8, ptr @bits_use.{{[0-9]*}}, i32 [[R5]] ; CHECK: [[R9:%[^ ]*]] = load i8, ptr [[R8]] - ; CHECK: [[R10:%[^ ]*]] = and i8 [[R9]], 1 + ; CHECK: [[R10:%[^ ]*]] = and i8 [[R9]], ptrtoint (ptr inttoptr (i8 1 to ptr) to i8) ; CHECK: [[R11:%[^ ]*]] = icmp ne i8 [[R10]], 0 ; CHECK: [[R16:%[^ ]*]] = phi i1 [ false, {{%[^ ]*}} ], [ [[R11]], {{%[^ ]*}} ] @@ -91,7 +91,7 @@ define i1 @baz(ptr %p) { ; CHECK: [[T8:%[^ ]*]] = getelementptr i8, ptr @bits_use{{(\.[0-9]*)?}}, i32 [[T5]] ; CHECK: [[T9:%[^ ]*]] = load i8, ptr [[T8]] - ; CHECK: [[T10:%[^ ]*]] = and i8 [[T9]], 2 + ; CHECK: [[T10:%[^ ]*]] = and i8 [[T9]], ptrtoint (ptr inttoptr (i8 2 to ptr) to i8) ; CHECK: [[T11:%[^ ]*]] = icmp ne i8 [[T10]], 0 ; CHECK: [[T16:%[^ ]*]] = phi i1 [ false, {{%[^ ]*}} ], [ [[T11]], {{%[^ ]*}} ] diff --git a/llvm/test/Transforms/SCCP/binaryops-constexprs.ll b/llvm/test/Transforms/SCCP/binaryops-constexprs.ll index 31d816cfcebd7..bf4a366f5c1f2 100644 --- a/llvm/test/Transforms/SCCP/binaryops-constexprs.ll +++ b/llvm/test/Transforms/SCCP/binaryops-constexprs.ll @@ -8,10 +8,12 @@ define void @and_constexpr(i32 %a) { ; CHECK-LABEL: @and_constexpr( ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @use.i32(i32 0) -; CHECK-NEXT: [[AND_2:%.*]] = and i32 20, [[A:%.*]] +; CHECK-NEXT: [[AND_2:%.*]] = and i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[AND_2]]) -; CHECK-NEXT: call void @use.i1(i1 true) -; CHECK-NEXT: call void @use.i1(i1 false) +; CHECK-NEXT: [[TRUE_1:%.*]] = icmp ne i32 [[AND_2]], 100 +; CHECK-NEXT: call void @use.i1(i1 [[TRUE_1]]) +; CHECK-NEXT: [[FALSE_1:%.*]] = icmp eq i32 [[AND_2]], 100 +; CHECK-NEXT: call void @use.i1(i1 [[FALSE_1]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i32 [[AND_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) ; CHECK-NEXT: call void @use.i32(i32 4) @@ -38,7 +40,7 @@ define void @add_constexpr(i32 %a) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 0, [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[ADD_1]]) -; CHECK-NEXT: [[ADD_2:%.*]] = add i32 20, [[A]] +; CHECK-NEXT: [[ADD_2:%.*]] = add i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A]] ; CHECK-NEXT: call void @use.i32(i32 [[ADD_2]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp ne i32 [[ADD_2]], 100 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) @@ -46,7 +48,7 @@ define void @add_constexpr(i32 %a) { ; CHECK-NEXT: call void @use.i1(i1 [[COND_2]]) ; CHECK-NEXT: [[COND_3:%.*]] = icmp eq i32 [[ADD_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_3]]) -; CHECK-NEXT: call void @use.i32(i32 120) +; CHECK-NEXT: call void @use.i32(i32 add (i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), i32 ptrtoint (ptr inttoptr (i32 100 to ptr) to i32))) ; CHECK-NEXT: ret void ; entry: @@ -69,7 +71,7 @@ define void @mul_constexpr(i32 %a) { ; CHECK-LABEL: @mul_constexpr( ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @use.i32(i32 0) -; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 20, [[A:%.*]] +; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[MUL_2]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp ne i32 [[MUL_2]], 100 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) @@ -77,7 +79,8 @@ define void @mul_constexpr(i32 %a) { ; CHECK-NEXT: call void @use.i1(i1 [[COND_2]]) ; CHECK-NEXT: [[COND_3:%.*]] = icmp eq i32 [[MUL_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_3]]) -; CHECK-NEXT: call void @use.i32(i32 2000) +; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), ptrtoint (ptr inttoptr (i32 100 to ptr) to i32) +; CHECK-NEXT: call void @use.i32(i32 [[MUL_3]]) ; CHECK-NEXT: ret void ; entry: @@ -100,13 +103,16 @@ define void @udiv_constexpr(i32 %a) { ; CHECK-LABEL: @udiv_constexpr( ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @use.i32(i32 0) -; CHECK-NEXT: [[UDIV_2:%.*]] = udiv i32 20, [[A:%.*]] +; CHECK-NEXT: [[UDIV_2:%.*]] = udiv i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[UDIV_2]]) -; CHECK-NEXT: call void @use.i1(i1 true) -; CHECK-NEXT: call void @use.i1(i1 false) +; CHECK-NEXT: [[TRUE_1:%.*]] = icmp ne i32 [[UDIV_2]], 100 +; CHECK-NEXT: call void @use.i1(i1 [[TRUE_1]]) +; CHECK-NEXT: [[FALSE_1:%.*]] = icmp eq i32 [[UDIV_2]], 50 +; CHECK-NEXT: call void @use.i1(i1 [[FALSE_1]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i32 [[UDIV_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) -; CHECK-NEXT: call void @use.i32(i32 0) +; CHECK-NEXT: [[UDIV_3:%.*]] = udiv i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), ptrtoint (ptr inttoptr (i32 100 to ptr) to i32) +; CHECK-NEXT: call void @use.i32(i32 [[UDIV_3]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp index 21d45960dce7c..fe9e7e8228490 100644 --- a/llvm/unittests/IR/InstructionsTest.cpp +++ b/llvm/unittests/IR/InstructionsTest.cpp @@ -606,82 +606,63 @@ TEST(InstructionTest, ConstrainedTrans) { TEST(InstructionsTest, isEliminableCastPair) { LLVMContext C; + DataLayout DL1("p1:32:32"); - Type* Int16Ty = Type::getInt16Ty(C); - Type* Int32Ty = Type::getInt32Ty(C); - Type* Int64Ty = Type::getInt64Ty(C); - Type *Int64PtrTy = PointerType::get(C, 0); + Type *Int16Ty = Type::getInt16Ty(C); + Type *Int64Ty = Type::getInt64Ty(C); + Type *PtrTy64 = PointerType::get(C, 0); + Type *PtrTy32 = PointerType::get(C, 1); // Source and destination pointers have same size -> bitcast. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, - CastInst::IntToPtr, - Int64PtrTy, Int64Ty, Int64PtrTy, - Int32Ty, nullptr, Int32Ty), - CastInst::BitCast); - - // Source and destination have unknown sizes, but the same address space and - // the intermediate int is the maximum pointer size -> bitcast - EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, - CastInst::IntToPtr, - Int64PtrTy, Int64Ty, Int64PtrTy, - nullptr, nullptr, nullptr), + CastInst::IntToPtr, PtrTy32, Int64Ty, + PtrTy32, &DL1), CastInst::BitCast); - // Source and destination have unknown sizes, but the same address space and - // the intermediate int is not the maximum pointer size -> nothing + // Source and destination have unknown sizes. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, - CastInst::IntToPtr, - Int64PtrTy, Int32Ty, Int64PtrTy, - nullptr, nullptr, nullptr), + CastInst::IntToPtr, PtrTy32, Int64Ty, + PtrTy32, nullptr), 0U); // Middle pointer big enough -> bitcast. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::PtrToInt, - Int64Ty, Int64PtrTy, Int64Ty, - nullptr, Int64Ty, nullptr), + CastInst::PtrToInt, Int64Ty, PtrTy64, + Int64Ty, &DL1), CastInst::BitCast); // Middle pointer too small -> fail. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::PtrToInt, - Int64Ty, Int64PtrTy, Int64Ty, - nullptr, Int32Ty, nullptr), + CastInst::PtrToInt, Int64Ty, PtrTy32, + Int64Ty, &DL1), 0U); // Test that we don't eliminate bitcasts between different address spaces, // or if we don't have available pointer size information. - DataLayout DL("e-p:32:32:32-p1:16:16:16-p2:64:64:64-i1:8:8-i8:8:8-i16:16:16" - "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64" - "-v128:128:128-a:0:64-s:64:64-f80:128:128-n8:16:32:64-S128"); + DataLayout DL2("e-p:32:32:32-p1:16:16:16-p2:64:64:64-i1:8:8-i8:8:8-i16:16:16" + "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64" + "-v128:128:128-a:0:64-s:64:64-f80:128:128-n8:16:32:64-S128"); Type *Int64PtrTyAS1 = PointerType::get(C, 1); Type *Int64PtrTyAS2 = PointerType::get(C, 2); - IntegerType *Int16SizePtr = DL.getIntPtrType(C, 1); - IntegerType *Int64SizePtr = DL.getIntPtrType(C, 2); - // Cannot simplify inttoptr, addrspacecast EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::AddrSpaceCast, - Int16Ty, Int64PtrTyAS1, Int64PtrTyAS2, - nullptr, Int16SizePtr, Int64SizePtr), + CastInst::AddrSpaceCast, Int16Ty, + Int64PtrTyAS1, Int64PtrTyAS2, &DL2), 0U); // Cannot simplify addrspacecast, ptrtoint EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::AddrSpaceCast, - CastInst::PtrToInt, - Int64PtrTyAS1, Int64PtrTyAS2, Int16Ty, - Int64SizePtr, Int16SizePtr, nullptr), + CastInst::PtrToInt, Int64PtrTyAS1, + Int64PtrTyAS2, Int16Ty, &DL2), 0U); // Pass since the bitcast address spaces are the same - EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::BitCast, - Int16Ty, Int64PtrTyAS1, Int64PtrTyAS1, - nullptr, nullptr, nullptr), + EXPECT_EQ(CastInst::isEliminableCastPair( + CastInst::IntToPtr, CastInst::BitCast, Int16Ty, Int64PtrTyAS1, + Int64PtrTyAS1, nullptr), CastInst::IntToPtr); - } TEST(InstructionsTest, CloneCall) {