-
Notifications
You must be signed in to change notification settings - Fork 15.2k
Revert "[DAG] Fold (umin (sub a b) a) -> (usubo a b); (select usubo.1 a usubo.0)" #167854
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
… a usubo…" This reverts commit 1f58cbe.
|
@llvm/pr-subscribers-llvm-selectiondag @llvm/pr-subscribers-backend-x86 Author: Simon Pilgrim (RKSimon) ChangesReverts llvm/llvm-project#161651 due to downstream bad codegen reports Full diff: https://github.com/llvm/llvm-project/pull/167854.diff 3 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d9d3a3ec01757..df353c4d91b1a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6219,25 +6219,6 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
SDLoc(N), VT, N0, N1))
return SD;
- if (TLI.isOperationLegalOrCustom(ISD::USUBO, VT) &&
- !TLI.isOperationLegalOrCustom(ISD::UMIN, VT)) {
- SDValue B;
-
- // (umin (sub a, b), a) -> (usubo a, b); (select usubo.1, a, usubo.0)
- if (sd_match(N0, m_Sub(m_Specific(N1), m_Value(B)))) {
- SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT));
- SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N1, B);
- return DAG.getSelect(DL, VT, USO.getValue(1), N1, USO.getValue(0));
- }
-
- // (umin a, (sub a, b)) -> (usubo a, b); (select usubo.1, a, usubo.0)
- if (sd_match(N1, m_Sub(m_Specific(N0), m_Value(B)))) {
- SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT));
- SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N0, B);
- return DAG.getSelect(DL, VT, USO.getValue(1), N0, USO.getValue(0));
- }
- }
-
// Simplify the operands using demanded-bits information.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
diff --git a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
deleted file mode 100644
index fe3eee06db65e..0000000000000
--- a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
+++ /dev/null
@@ -1,151 +0,0 @@
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
-
-; GitHub issue #161036
-
-; Positive test : umin(sub(a,b),a) with scalar types should be folded
-define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs x8, x0, x1
-; CHECK-NEXT: csel x0, x0, x8, lo
-; CHECK-NEXT: ret
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : umin(a,sub(a,b)) with scalar types should be folded
-define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs x8, x0, x1
-; CHECK-NEXT: csel x0, x0, x8, lo
-; CHECK-NEXT: ret
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
- ret i64 %cond
-}
-
-; Positive test : multi-use is OK since the sub instruction still runs once
-define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i64_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs x8, x0, x1
-; CHECK-NEXT: csel x0, x0, x8, lo
-; CHECK-NEXT: str x8, [x2]
-; CHECK-NEXT: ret
- %sub = sub i64 %a, %b
- store i64 %sub, ptr addrspace(1) %ptr
- %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: csel w0, w0, w8, lo
-; CHECK-NEXT: ret
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: csel w0, w0, w8, lo
-; CHECK-NEXT: ret
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i32_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: csel w0, w0, w8, lo
-; CHECK-NEXT: str w8, [x2]
-; CHECK-NEXT: ret
- %sub = sub i32 %a, %b
- store i32 %sub, ptr addrspace(1) %ptr
- %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Negative test : i16
-define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16
-; CHECK-LABEL: %bb.0:
-; CHECK-LABEL: sub w8, w0, w1
-; CHECK-LABEL: and w9, w0, #0xffff
-; CHECK-LABEL: and w8, w8, #0xffff
-; CHECK-LABEL: cmp w8, w9
-; CHECK-LABEL: csel w0, w8, w9, lo
-; CHECK-LABEL: ret
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-; Negative test : i16
-define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-LABEL: sub w8, w0, w1
-; CHECK-LABEL: and w9, w0, #0xffff
-; CHECK-LABEL: and w8, w8, #0xffff
-; CHECK-LABEL: cmp w9, w8
-; CHECK-LABEL: csel w0, w9, w8, lo
-; CHECK-LABEL: ret
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
- ret i16 %cond
-}
-
-; Negative test : i16
-define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i16_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-LABEL: sub w8, w0, w1
-; CHECK-LABEL: and w9, w0, #0xffff
-; CHECK-LABEL: and w10, w8, #0xffff
-; CHECK-LABEL: strh w8, [x2]
-; CHECK-LABEL: cmp w10, w9
-; CHECK-LABEL: csel w0, w10, w9, lo
-; CHECK-LABEL: ret
- %sub = sub i16 %a, %b
- store i16 %sub, ptr addrspace(1) %ptr
- %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-; Negative test, vector types : umin(sub(a,b),a) but with vectors
-define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
-; CHECK-LABEL: underflow_compare_dontfold_vectors
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: sub v1.16b, v0.16b, v1.16b
-; CHECK-NEXT: umin v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: ret
- %sub = sub <16 x i8> %a, %b
- %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
- ret <16 x i8> %cond
-}
-
-; Negative test, pattern mismatch : umin(add(a,b),a)
-define i64 @umin_add(i64 %a, i64 %b) {
-; CHECK-LABEL: umin_add
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: add x8, x0, x1
-; CHECK-NEXT: cmp x8, x0
-; CHECK-NEXT: csel x0, x8, x0, lo
-; CHECK-NEXT: ret
- %add = add i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
- ret i64 %cond
-}
diff --git a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
deleted file mode 100644
index e9756b411eb2c..0000000000000
--- a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
+++ /dev/null
@@ -1,156 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64 | FileCheck %s
-
-; GitHub issue #161036
-
-; Positive test : umin(sub(a,b),a) with scalar types should be folded
-define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: subq %rsi, %rax
-; CHECK-NEXT: cmovbq %rdi, %rax
-; CHECK-NEXT: retq
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : umin(a,sub(a,b)) with scalar types should be folded
-define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64_commute
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: subq %rsi, %rax
-; CHECK-NEXT: cmovbq %rdi, %rax
-; CHECK-NEXT: retq
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
- ret i64 %cond
-}
-
-; Positive test : multi-use is OK since the sub instruction still runs once
-define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i64_multi_use
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: subq %rsi, %rax
-; CHECK-NEXT: movq %rax, (%rdx)
-; CHECK-NEXT: cmovbq %rdi, %rax
-; CHECK-NEXT: retq
- %sub = sub i64 %a, %b
- store i64 %sub, ptr addrspace(1) %ptr
- %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl %esi, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: retq
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32_commute
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl %esi, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: retq
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i32_multi_use
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl %esi, %eax
-; CHECK-NEXT: movl %eax, (%rdx)
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: retq
- %sub = sub i32 %a, %b
- store i32 %sub, ptr addrspace(1) %ptr
- %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Positive test : i16
-define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subw %si, %ax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-; Positive test : i16
-define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subw %si, %ax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
- ret i16 %cond
-}
-
-; Positive test : i16
-define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i16_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subw %si, %ax
-; CHECK-NEXT: movw %ax, (%rdx)
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
- %sub = sub i16 %a, %b
- store i16 %sub, ptr addrspace(1) %ptr
- %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-
-; Negative test, vector types : umin(sub(a,b),a) but with vectors
-define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
-; CHECK-LABEL: underflow_compare_dontfold_vectors
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movdqa %xmm0, %xmm2
-; CHECK-NEXT: psubb %xmm1, %xmm2
-; CHECK-NEXT: pminub %xmm2, %xmm0
-; CHECK-NEXT: retq
- %sub = sub <16 x i8> %a, %b
- %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
- ret <16 x i8> %cond
-}
-
-; Negative test, pattern mismatch : umin(add(a,b),a)
-define i64 @umin_add(i64 %a, i64 %b) {
-; CHECK-LABEL: umin_add
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: leaq (%rsi,%rdi), %rax
-; CHECK-NEXT: cmpq %rdi, %rax
-; CHECK-NEXT: cmovaeq %rdi, %rax
-; CHECK-NEXT: retq
- %add = add i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
- ret i64 %cond
-}
|
|
@llvm/pr-subscribers-backend-aarch64 Author: Simon Pilgrim (RKSimon) ChangesReverts llvm/llvm-project#161651 due to downstream bad codegen reports Full diff: https://github.com/llvm/llvm-project/pull/167854.diff 3 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d9d3a3ec01757..df353c4d91b1a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6219,25 +6219,6 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
SDLoc(N), VT, N0, N1))
return SD;
- if (TLI.isOperationLegalOrCustom(ISD::USUBO, VT) &&
- !TLI.isOperationLegalOrCustom(ISD::UMIN, VT)) {
- SDValue B;
-
- // (umin (sub a, b), a) -> (usubo a, b); (select usubo.1, a, usubo.0)
- if (sd_match(N0, m_Sub(m_Specific(N1), m_Value(B)))) {
- SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT));
- SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N1, B);
- return DAG.getSelect(DL, VT, USO.getValue(1), N1, USO.getValue(0));
- }
-
- // (umin a, (sub a, b)) -> (usubo a, b); (select usubo.1, a, usubo.0)
- if (sd_match(N1, m_Sub(m_Specific(N0), m_Value(B)))) {
- SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT));
- SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N0, B);
- return DAG.getSelect(DL, VT, USO.getValue(1), N0, USO.getValue(0));
- }
- }
-
// Simplify the operands using demanded-bits information.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
diff --git a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
deleted file mode 100644
index fe3eee06db65e..0000000000000
--- a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
+++ /dev/null
@@ -1,151 +0,0 @@
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
-
-; GitHub issue #161036
-
-; Positive test : umin(sub(a,b),a) with scalar types should be folded
-define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs x8, x0, x1
-; CHECK-NEXT: csel x0, x0, x8, lo
-; CHECK-NEXT: ret
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : umin(a,sub(a,b)) with scalar types should be folded
-define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs x8, x0, x1
-; CHECK-NEXT: csel x0, x0, x8, lo
-; CHECK-NEXT: ret
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
- ret i64 %cond
-}
-
-; Positive test : multi-use is OK since the sub instruction still runs once
-define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i64_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs x8, x0, x1
-; CHECK-NEXT: csel x0, x0, x8, lo
-; CHECK-NEXT: str x8, [x2]
-; CHECK-NEXT: ret
- %sub = sub i64 %a, %b
- store i64 %sub, ptr addrspace(1) %ptr
- %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: csel w0, w0, w8, lo
-; CHECK-NEXT: ret
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: csel w0, w0, w8, lo
-; CHECK-NEXT: ret
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i32_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: csel w0, w0, w8, lo
-; CHECK-NEXT: str w8, [x2]
-; CHECK-NEXT: ret
- %sub = sub i32 %a, %b
- store i32 %sub, ptr addrspace(1) %ptr
- %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Negative test : i16
-define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16
-; CHECK-LABEL: %bb.0:
-; CHECK-LABEL: sub w8, w0, w1
-; CHECK-LABEL: and w9, w0, #0xffff
-; CHECK-LABEL: and w8, w8, #0xffff
-; CHECK-LABEL: cmp w8, w9
-; CHECK-LABEL: csel w0, w8, w9, lo
-; CHECK-LABEL: ret
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-; Negative test : i16
-define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-LABEL: sub w8, w0, w1
-; CHECK-LABEL: and w9, w0, #0xffff
-; CHECK-LABEL: and w8, w8, #0xffff
-; CHECK-LABEL: cmp w9, w8
-; CHECK-LABEL: csel w0, w9, w8, lo
-; CHECK-LABEL: ret
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
- ret i16 %cond
-}
-
-; Negative test : i16
-define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i16_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-LABEL: sub w8, w0, w1
-; CHECK-LABEL: and w9, w0, #0xffff
-; CHECK-LABEL: and w10, w8, #0xffff
-; CHECK-LABEL: strh w8, [x2]
-; CHECK-LABEL: cmp w10, w9
-; CHECK-LABEL: csel w0, w10, w9, lo
-; CHECK-LABEL: ret
- %sub = sub i16 %a, %b
- store i16 %sub, ptr addrspace(1) %ptr
- %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-; Negative test, vector types : umin(sub(a,b),a) but with vectors
-define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
-; CHECK-LABEL: underflow_compare_dontfold_vectors
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: sub v1.16b, v0.16b, v1.16b
-; CHECK-NEXT: umin v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: ret
- %sub = sub <16 x i8> %a, %b
- %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
- ret <16 x i8> %cond
-}
-
-; Negative test, pattern mismatch : umin(add(a,b),a)
-define i64 @umin_add(i64 %a, i64 %b) {
-; CHECK-LABEL: umin_add
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: add x8, x0, x1
-; CHECK-NEXT: cmp x8, x0
-; CHECK-NEXT: csel x0, x8, x0, lo
-; CHECK-NEXT: ret
- %add = add i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
- ret i64 %cond
-}
diff --git a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
deleted file mode 100644
index e9756b411eb2c..0000000000000
--- a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
+++ /dev/null
@@ -1,156 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64 | FileCheck %s
-
-; GitHub issue #161036
-
-; Positive test : umin(sub(a,b),a) with scalar types should be folded
-define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: subq %rsi, %rax
-; CHECK-NEXT: cmovbq %rdi, %rax
-; CHECK-NEXT: retq
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : umin(a,sub(a,b)) with scalar types should be folded
-define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
-; CHECK-LABEL: underflow_compare_fold_i64_commute
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: subq %rsi, %rax
-; CHECK-NEXT: cmovbq %rdi, %rax
-; CHECK-NEXT: retq
- %sub = sub i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
- ret i64 %cond
-}
-
-; Positive test : multi-use is OK since the sub instruction still runs once
-define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i64_multi_use
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: subq %rsi, %rax
-; CHECK-NEXT: movq %rax, (%rdx)
-; CHECK-NEXT: cmovbq %rdi, %rax
-; CHECK-NEXT: retq
- %sub = sub i64 %a, %b
- store i64 %sub, ptr addrspace(1) %ptr
- %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
- ret i64 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl %esi, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: retq
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
-; CHECK-LABEL: underflow_compare_fold_i32_commute
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl %esi, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: retq
- %sub = sub i32 %a, %b
- %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
- ret i32 %cond
-}
-
-; Positive test : i32
-define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i32_multi_use
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl %esi, %eax
-; CHECK-NEXT: movl %eax, (%rdx)
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: retq
- %sub = sub i32 %a, %b
- store i32 %sub, ptr addrspace(1) %ptr
- %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
- ret i32 %cond
-}
-
-; Positive test : i16
-define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subw %si, %ax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-; Positive test : i16
-define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
-; CHECK-LABEL: underflow_compare_fold_i16_commute
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subw %si, %ax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
- %sub = sub i16 %a, %b
- %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
- ret i16 %cond
-}
-
-; Positive test : i16
-define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: underflow_compare_fold_i16_multi_use
-; CHECK-LABEL: %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subw %si, %ax
-; CHECK-NEXT: movw %ax, (%rdx)
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
- %sub = sub i16 %a, %b
- store i16 %sub, ptr addrspace(1) %ptr
- %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
- ret i16 %cond
-}
-
-
-; Negative test, vector types : umin(sub(a,b),a) but with vectors
-define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
-; CHECK-LABEL: underflow_compare_dontfold_vectors
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: movdqa %xmm0, %xmm2
-; CHECK-NEXT: psubb %xmm1, %xmm2
-; CHECK-NEXT: pminub %xmm2, %xmm0
-; CHECK-NEXT: retq
- %sub = sub <16 x i8> %a, %b
- %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
- ret <16 x i8> %cond
-}
-
-; Negative test, pattern mismatch : umin(add(a,b),a)
-define i64 @umin_add(i64 %a, i64 %b) {
-; CHECK-LABEL: umin_add
-; CHECK-LABEL: %bb.0
-; CHECK-NEXT: leaq (%rsi,%rdi), %rax
-; CHECK-NEXT: cmpq %rdi, %rax
-; CHECK-NEXT: cmovaeq %rdi, %rax
-; CHECK-NEXT: retq
- %add = add i64 %a, %b
- %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
- ret i64 %cond
-}
|
|
Thanks for the revert, I was just about to create a PR. Do we usually reopen the issue as well? |
|
Sorry my bad, I see it's already reopened. |
mstorsjo
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM, thanks!
Reverts #161651 due to downstream bad codegen reports