Skip to content

Commit

Permalink
[DAGCombiner] fold or (and x, y), x --> x
Browse files Browse the repository at this point in the history
Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D138398
  • Loading branch information
bcl5980 committed Nov 21, 2022
1 parent 281a5c7 commit ac1b999
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 69 deletions.
4 changes: 4 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Expand Up @@ -6988,6 +6988,10 @@ static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1,
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);

// fold or (and x, y), x --> x
if (N00 == N1 || N01 == N1)
return N1;

// fold (or (and X, (xor Y, -1)), Y) -> (or X, Y)
// TODO: Set AllowUndefs = true.
if (getBitwiseNotOperand(N01, N00,
Expand Down
32 changes: 8 additions & 24 deletions llvm/test/CodeGen/X86/avx512-select.ll
Expand Up @@ -742,51 +742,35 @@ define void @select_v1i1(ptr %w, ptr %x, ptr %y, i1 %z) nounwind {
define i8 @julia_issue36955(<8 x i1> %mask, <8 x double> %a) {
; X86-AVX512F-LABEL: julia_issue36955:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; X86-AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; X86-AVX512F-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X86-AVX512F-NEXT: vcmplepd %zmm2, %zmm1, %k1
; X86-AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; X86-AVX512F-NEXT: korw %k0, %k1, %k0
; X86-AVX512F-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X86-AVX512F-NEXT: vcmplepd %zmm0, %zmm1, %k0
; X86-AVX512F-NEXT: kmovw %k0, %eax
; X86-AVX512F-NEXT: # kill: def $al killed $al killed $eax
; X86-AVX512F-NEXT: vzeroupper
; X86-AVX512F-NEXT: retl
;
; X64-AVX512F-LABEL: julia_issue36955:
; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; X64-AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; X64-AVX512F-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-AVX512F-NEXT: vcmplepd %zmm2, %zmm1, %k1
; X64-AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; X64-AVX512F-NEXT: korw %k0, %k1, %k0
; X64-AVX512F-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X64-AVX512F-NEXT: vcmplepd %zmm0, %zmm1, %k0
; X64-AVX512F-NEXT: kmovw %k0, %eax
; X64-AVX512F-NEXT: # kill: def $al killed $al killed $eax
; X64-AVX512F-NEXT: vzeroupper
; X64-AVX512F-NEXT: retq
;
; X86-AVX512BW-LABEL: julia_issue36955:
; X86-AVX512BW: # %bb.0:
; X86-AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
; X86-AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-AVX512BW-NEXT: vxorpd %xmm3, %xmm3, %xmm3
; X86-AVX512BW-NEXT: vcmplepd %zmm3, %zmm1, %k1
; X86-AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm2, %k0 {%k1}
; X86-AVX512BW-NEXT: korw %k0, %k1, %k0
; X86-AVX512BW-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X86-AVX512BW-NEXT: vcmplepd %zmm0, %zmm1, %k0
; X86-AVX512BW-NEXT: kmovd %k0, %eax
; X86-AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; X86-AVX512BW-NEXT: vzeroupper
; X86-AVX512BW-NEXT: retl
;
; X64-AVX512BW-LABEL: julia_issue36955:
; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
; X64-AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-AVX512BW-NEXT: vxorpd %xmm3, %xmm3, %xmm3
; X64-AVX512BW-NEXT: vcmplepd %zmm3, %zmm1, %k1
; X64-AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm2, %k0 {%k1}
; X64-AVX512BW-NEXT: korw %k0, %k1, %k0
; X64-AVX512BW-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X64-AVX512BW-NEXT: vcmplepd %zmm0, %zmm1, %k0
; X64-AVX512BW-NEXT: kmovd %k0, %eax
; X64-AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; X64-AVX512BW-NEXT: vzeroupper
Expand Down
22 changes: 10 additions & 12 deletions llvm/test/CodeGen/X86/bswap_tree2.ll
Expand Up @@ -10,23 +10,21 @@
; CHECK-LABEL: test1:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzwl %ax, %ecx
; CHECK-NEXT: orl %eax, %ecx
; CHECK-NEXT: orl $-16777216, %ecx # imm = 0xFF000000
; CHECK-NEXT: shrl $8, %ecx
; CHECK-NEXT: andl $16711935, %eax # imm = 0xFF00FF
; CHECK-NEXT: shll $8, %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: andl $16711935, %ecx # imm = 0xFF00FF
; CHECK-NEXT: shll $8, %ecx
; CHECK-NEXT: orl $-16777216, %eax # imm = 0xFF000000
; CHECK-NEXT: shrl $8, %eax
; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test1:
; CHECK64: # %bb.0:
; CHECK64-NEXT: movzwl %di, %eax
; CHECK64-NEXT: orl %edi, %eax
; CHECK64-NEXT: orl $-16777216, %eax # imm = 0xFF000000
; CHECK64-NEXT: shrl $8, %eax
; CHECK64-NEXT: andl $16711935, %edi # imm = 0xFF00FF
; CHECK64-NEXT: shll $8, %edi
; CHECK64-NEXT: movl %edi, %eax
; CHECK64-NEXT: andl $16711935, %eax # imm = 0xFF00FF
; CHECK64-NEXT: shll $8, %eax
; CHECK64-NEXT: orl $-16777216, %edi # imm = 0xFF000000
; CHECK64-NEXT: shrl $8, %edi
; CHECK64-NEXT: orl %edi, %eax
; CHECK64-NEXT: retq
%byte0 = and i32 %x, 255 ; 0x000000ff
Expand Down
42 changes: 9 additions & 33 deletions llvm/test/CodeGen/X86/known-signbits-vector.ll
Expand Up @@ -371,46 +371,22 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4
; X86-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: vpsrlq $60, %xmm0, %xmm2
; X86-NEXT: vpsrlq $61, %xmm0, %xmm0
; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; X86-NEXT: vmovdqa {{.*#+}} xmm2 = [4,0,8,0]
; X86-NEXT: vpxor %xmm2, %xmm0, %xmm0
; X86-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; X86-NEXT: vpand %xmm1, %xmm0, %xmm2
; X86-NEXT: vpor %xmm1, %xmm2, %xmm1
; X86-NEXT: vpsrad $29, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-NEXT: vpxor %xmm0, %xmm1, %xmm0
; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpsrlq $60, %xmm0, %xmm2
; X64-AVX1-NEXT: vpsrlq $61, %xmm0, %xmm0
; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4,8]
; X64-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; X64-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; X64-AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; X64-AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
; X64-AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [4,8]
; X64-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
; X64-AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX2-NEXT: retq
; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
; X64: # %bb.0:
; X64-NEXT: vpsrad $29, %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: vpxor %xmm0, %xmm1, %xmm0
; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-NEXT: retq
%1 = ashr <2 x i64> %a0, <i64 61, i64 60>
%2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%3 = sext <2 x i32> %2 to <2 x i64>
Expand Down

0 comments on commit ac1b999

Please sign in to comment.