Skip to content

Conversation

@paulwalker-arm
Copy link
Collaborator

@llvmbot llvmbot added llvm:instcombine Covers the InstCombine, InstSimplify and AggressiveInstCombine passes llvm:transforms labels Nov 12, 2025
@llvmbot
Copy link
Member

llvmbot commented Nov 12, 2025

@llvm/pr-subscribers-llvm-transforms

Author: Paul Walker (paulwalker-arm)

Changes

https://alive2.llvm.org/ce/z/n2oBKP


Full diff: https://github.com/llvm/llvm-project/pull/167693.diff

2 Files Affected:

  • (modified) llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (+9-1)
  • (modified) llvm/test/Transforms/InstCombine/not.ll (+54)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index cbaff294819a2..ba5568b00441b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5096,9 +5096,17 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
     return &I;
   }
 
+  // not (bitcast (cmp A, B) --> bitcast (!cmp A, B)
+  if (match(NotOp, m_OneUse(m_BitCast(m_Value(X)))) &&
+      match(X, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
+    cast<CmpInst>(X)->setPredicate(CmpInst::getInversePredicate(Pred));
+    return new BitCastInst(X, Ty);
+  }
+
   // Move a 'not' ahead of casts of a bool to enable logic reduction:
   // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
-  if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) && X->getType()->isIntOrIntVectorTy(1)) {
+  if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) &&
+      X->getType()->isIntOrIntVectorTy(1)) {
     Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy();
     Value *NotX = Builder.CreateNot(X);
     Value *Sext = Builder.CreateSExt(NotX, SextTy);
diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll
index d693b9d8f8557..60d7131a57021 100644
--- a/llvm/test/Transforms/InstCombine/not.ll
+++ b/llvm/test/Transforms/InstCombine/not.ll
@@ -1061,3 +1061,57 @@ if.else:
   call void @f2()
   unreachable
 }
+
+define i8 @invert_bitcasted_icmp(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT:    [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT:    ret i8 [[MASK_AS_INT]]
+;
+  %cmp = icmp sle <8 x i32> %a, %b
+  %mask.as.int = bitcast <8 x i1> %cmp to i8
+  %not = xor i8 %mask.as.int, 255
+  ret i8 %not
+}
+
+define i8 @invert_bitcasted_icmp_multi_use_1(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp_multi_use_1(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sle <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT:    call void (...) @llvm.fake.use(<8 x i1> [[CMP]])
+; CHECK-NEXT:    [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[MASK_AS_INT]], -1
+; CHECK-NEXT:    ret i8 [[NOT]]
+;
+  %cmp = icmp sle <8 x i32> %a, %b
+  call void (...) @llvm.fake.use(<8 x i1> %cmp)
+  %mask.as.int = bitcast <8 x i1> %cmp to i8
+  %not = xor i8 %mask.as.int, -1
+  ret i8 %not
+}
+
+define i8 @invert_bitcasted_icmp_multi_use_2(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp_multi_use_2(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sle <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT:    [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT:    call void (...) @llvm.fake.use(i8 [[MASK_AS_INT]])
+; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[MASK_AS_INT]], -1
+; CHECK-NEXT:    ret i8 [[NOT]]
+;
+  %cmp = icmp sle <8 x i32> %a, %b
+  %mask.as.int = bitcast <8 x i1> %cmp to i8
+  call void (...) @llvm.fake.use(i8 %mask.as.int)
+  %not = xor i8 %mask.as.int, -1
+  ret i8 %not
+}
+
+define i8 @invert_bitcasted_fcmp(<8 x float> %a, <8 x float> %b) {
+; CHECK-LABEL: @invert_bitcasted_fcmp(
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp uge <8 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT:    [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT:    ret i8 [[MASK_AS_INT]]
+;
+  %cmp = fcmp olt <8 x float> %a, %b
+  %mask.as.int = bitcast <8 x i1> %cmp to i8
+  %not = xor i8 %mask.as.int, 255
+  ret i8 %not
+}

Copy link
Member

@dtcxzyw dtcxzyw left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LG. If we encounter more patterns like not (bitcast oneuse X) -> bitcast (not X) in the future, we can switch to isFreeToInvert/getFreelyInverted.

@paulwalker-arm paulwalker-arm force-pushed the combine-invert-bitcasted-cmp branch from 1fa4f6f to 468cdd5 Compare November 12, 2025 17:12
@paulwalker-arm paulwalker-arm merged commit f84ad45 into llvm:main Nov 13, 2025
10 checks passed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

llvm:instcombine Covers the InstCombine, InstSimplify and AggressiveInstCombine passes llvm:transforms

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants