diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp index 0e5bc481383a0..bc6b704bbc943 100644 --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -966,6 +966,7 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) { // recurse on our operand. This can cut a long search short if we know we're // not going to be able to get any useful information anways. switch (CI->getOpcode()) { + case Instruction::PtrToInt: case Instruction::Trunc: case Instruction::SExt: case Instruction::ZExt: @@ -977,6 +978,11 @@ LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) { return ValueLatticeElement::getOverdefined(); } + // Assumed predicate over the integral value of the pointer are constraints on + // the cast to integer value itself. + if (auto *PTI = dyn_cast(CI)) + return getBlockValue(PTI->getPointerOperand(), BB, PTI); + // Figure out the range of the LHS. If that fails, we still apply the // transfer rule on the full set since we may be able to locally infer // interesting facts. @@ -1350,6 +1356,20 @@ std::optional LazyValueInfoImpl::getValueFromICmpCondition( } Type *Ty = Val->getType(); + + // On the off-chance we may compute a range over the address of a pointer. + ConstantInt *CI = nullptr; + if (Ty->isPointerTy() && LHS == Val && + match(RHS, m_IntToPtr(m_ConstantInt(CI)))) { + if (Ty->getPointerAddressSpace() == + RHS->getType()->getPointerAddressSpace()) { + ConstantRange RHSRange(CI->getValue()); + ConstantRange AllowedR = + ConstantRange::makeAllowedICmpRegion(EdgePred, RHSRange); + return ValueLatticeElement::getRange(AllowedR); + } + } + if (!Ty->isIntegerTy()) return ValueLatticeElement::getOverdefined(); diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll index c9ee233b5a461..68fc73743f607 100644 --- a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll @@ -355,3 +355,58 @@ define i8 @test_umax_nneg(i8 %a, i8 %b) { %ret = call i8 @llvm.umax.i8(i8 %nneg_a, i8 %nneg_b) ret i8 %ret } + +define i8 @test_umin_ptr_address(ptr %p) { +; CHECK-LABEL: @test_umin_ptr_address( +; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32 +; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw nsw i64 [[PI_SHR]] to i8 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult ptr [[P]], inttoptr (i64 176093659136 to ptr) +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: ret i8 [[PI_HI]] +; + %pi = ptrtoint ptr %p to i64 + %pi.shr = lshr i64 %pi, 32 + %pi.hi = trunc nuw i64 %pi.shr to i8 + %umin = call i8 @llvm.umin.i8(i8 %pi.hi, i8 41) + %cmp = icmp ult ptr %p, inttoptr (i64 176093659136 to ptr) + call void @llvm.assume(i1 %cmp) + ret i8 %umin +} + +define i8 @test_umax_ptr_address(ptr %p) { +; CHECK-LABEL: @test_umax_ptr_address( +; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32 +; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw i64 [[PI_SHR]] to i8 +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[P]], inttoptr (i64 180388626431 to ptr) +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: ret i8 [[PI_HI]] +; + %pi = ptrtoint ptr %p to i64 + %pi.shr = lshr i64 %pi, 32 + %pi.hi = trunc nuw i64 %pi.shr to i8 + %umin = call i8 @llvm.umax.i8(i8 %pi.hi, i8 41) + %cmp = icmp ugt ptr %p, inttoptr (i64 180388626431 to ptr) + call void @llvm.assume(i1 %cmp) + ret i8 %umin +} + +define i8 @test_umin_ptr_address_negative(ptr %p) { +; CHECK-LABEL: @test_umin_ptr_address_negative( +; CHECK-NEXT: [[PI:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[PI_SHR:%.*]] = lshr i64 [[PI]], 32 +; CHECK-NEXT: [[PI_HI:%.*]] = trunc nuw i64 [[PI_SHR]] to i8 +; CHECK-NEXT: [[UMIN:%.*]] = call i8 @llvm.umin.i8(i8 [[PI_HI]], i8 41) +; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[P]], inttoptr (i64 176093659136 to ptr) +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: ret i8 [[UMIN]] +; + %pi = ptrtoint ptr %p to i64 + %pi.shr = lshr i64 %pi, 32 + %pi.hi = trunc nuw i64 %pi.shr to i8 + %umin = call i8 @llvm.umin.i8(i8 %pi.hi, i8 41) + %cmp = icmp ne ptr %p, inttoptr (i64 176093659136 to ptr) + call void @llvm.assume(i1 %cmp) + ret i8 %umin +}