1,153 changes: 1,153 additions & 0 deletions llvm/test/CodeGen/X86/icmp-abs-C-vec.ll

Large diffs are not rendered by default.

209 changes: 209 additions & 0 deletions llvm/test/CodeGen/X86/icmp-abs-C.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86
; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64

declare i64 @llvm.abs.i64(i64, i1)
declare i32 @llvm.abs.i32(i32, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i8 @llvm.abs.i8(i8, i1)

define i64 @eq_or_with_dom_abs(i64 %x) nounwind {
; X86-LABEL: eq_or_with_dom_abs:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edx, %eax
; X86-NEXT: sarl $31, %eax
; X86-NEXT: xorl %eax, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: xorl %eax, %esi
; X86-NEXT: subl %eax, %esi
; X86-NEXT: sbbl %eax, %edx
; X86-NEXT: movl %esi, %eax
; X86-NEXT: xorl $12312, %eax # imm = 0x3018
; X86-NEXT: xorl $64, %esi
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: orl %edx, %esi
; X86-NEXT: sete %bl
; X86-NEXT: xorl %esi, %esi
; X86-NEXT: movl $2344, %edi # imm = 0x928
; X86-NEXT: cmpl %eax, %edi
; X86-NEXT: sbbl %edx, %esi
; X86-NEXT: jb .LBB0_2
; X86-NEXT: # %bb.1:
; X86-NEXT: movb %bl, %cl
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: .LBB0_2:
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: retl
;
; X64-LABEL: eq_or_with_dom_abs:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rcx
; X64-NEXT: negq %rcx
; X64-NEXT: cmovsq %rdi, %rcx
; X64-NEXT: movq %rcx, %rdx
; X64-NEXT: xorq $12312, %rdx # imm = 0x3018
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq $64, %rcx
; X64-NEXT: sete %al
; X64-NEXT: cmpq $2345, %rdx # imm = 0x929
; X64-NEXT: cmovaeq %rdx, %rax
; X64-NEXT: retq
%absx = call i64 @llvm.abs.i64(i64 %x, i1 true)
%foo = xor i64 %absx, 12312
%bar = icmp ugt i64 %foo, 2344
%cmp0 = icmp eq i64 %x, 64
%cmp1 = icmp eq i64 %x, -64
%cmp = or i1 %cmp0, %cmp1
%cmp64 = zext i1 %cmp to i64
%r = select i1 %bar, i64 %foo, i64 %cmp64
ret i64 %r
}

define i32 @eq_or_with_dom_abs_non_po2(i32 %x) nounwind {
; X86-LABEL: eq_or_with_dom_abs_non_po2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edx, %eax
; X86-NEXT: sarl $31, %eax
; X86-NEXT: xorl %eax, %edx
; X86-NEXT: subl %eax, %edx
; X86-NEXT: movl %edx, %eax
; X86-NEXT: xorl $12312, %eax # imm = 0x3018
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpl $123, %edx
; X86-NEXT: sete %dl
; X86-NEXT: cmpl $2345, %eax # imm = 0x929
; X86-NEXT: jae .LBB1_2
; X86-NEXT: # %bb.1:
; X86-NEXT: movb %dl, %cl
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: .LBB1_2:
; X86-NEXT: retl
;
; X64-LABEL: eq_or_with_dom_abs_non_po2:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: negl %ecx
; X64-NEXT: cmovsl %edi, %ecx
; X64-NEXT: movl %ecx, %edx
; X64-NEXT: xorl $12312, %edx # imm = 0x3018
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $123, %ecx
; X64-NEXT: sete %al
; X64-NEXT: cmpl $2345, %edx # imm = 0x929
; X64-NEXT: cmovael %edx, %eax
; X64-NEXT: retq
%absx = call i32 @llvm.abs.i32(i32 %x, i1 true)
%foo = xor i32 %absx, 12312
%bar = icmp ugt i32 %foo, 2344
%cmp0 = icmp eq i32 %x, 123
%cmp1 = icmp eq i32 %x, -123
%cmp = or i1 %cmp0, %cmp1
%cmp64 = zext i1 %cmp to i32
%r = select i1 %bar, i32 %foo, i32 %cmp64
ret i32 %r
}

define i8 @ne_and_with_dom_abs_non_pow2(i8 %x) nounwind {
; X86-LABEL: ne_and_with_dom_abs_non_pow2:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: sarb $7, %al
; X86-NEXT: xorb %al, %cl
; X86-NEXT: subb %al, %cl
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: xorb $12, %al
; X86-NEXT: cmpb $121, %cl
; X86-NEXT: setne %cl
; X86-NEXT: cmpb $24, %al
; X86-NEXT: jae .LBB2_2
; X86-NEXT: # %bb.1:
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: .LBB2_2:
; X86-NEXT: retl
;
; X64-LABEL: ne_and_with_dom_abs_non_pow2:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: sarb $7, %al
; X64-NEXT: xorb %al, %dil
; X64-NEXT: subb %al, %dil
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: xorb $12, %cl
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpb $121, %dil
; X64-NEXT: setne %al
; X64-NEXT: cmpb $24, %cl
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: cmovael %ecx, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%absx = call i8 @llvm.abs.i8(i8 %x, i1 true)
%foo = xor i8 %absx, 12
%bar = icmp ugt i8 %foo, 23
%cmp0 = icmp ne i8 %x, 121
%cmp1 = icmp ne i8 %x, -121
%cmp = and i1 %cmp0, %cmp1
%cmp64 = zext i1 %cmp to i8
%r = select i1 %bar, i8 %foo, i8 %cmp64
ret i8 %r
}

define i16 @ne_and_with_dom_abs(i16 %x) nounwind {
; X86-LABEL: ne_and_with_dom_abs:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movswl %cx, %eax
; X86-NEXT: sarl $15, %eax
; X86-NEXT: xorl %eax, %ecx
; X86-NEXT: subl %eax, %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: xorl $12312, %eax # imm = 0x3018
; X86-NEXT: movzwl %ax, %esi
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: cmpw $64, %cx
; X86-NEXT: setne %cl
; X86-NEXT: cmpl $2345, %esi # imm = 0x929
; X86-NEXT: jae .LBB3_2
; X86-NEXT: # %bb.1:
; X86-NEXT: movb %cl, %dl
; X86-NEXT: movl %edx, %eax
; X86-NEXT: .LBB3_2:
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: ne_and_with_dom_abs:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: negw %cx
; X64-NEXT: cmovsw %di, %cx
; X64-NEXT: movl %ecx, %edx
; X64-NEXT: xorl $12312, %edx # imm = 0x3018
; X64-NEXT: movzwl %dx, %esi
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpw $64, %cx
; X64-NEXT: setne %al
; X64-NEXT: cmpl $2345, %esi # imm = 0x929
; X64-NEXT: cmovael %edx, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%absx = call i16 @llvm.abs.i16(i16 %x, i1 true)
%foo = xor i16 %absx, 12312
%bar = icmp ugt i16 %foo, 2344
%cmp0 = icmp ne i16 %x, 64
%cmp1 = icmp ne i16 %x, -64
%cmp = and i1 %cmp0, %cmp1
%cmp64 = zext i1 %cmp to i16
%r = select i1 %bar, i16 %foo, i16 %cmp64
ret i16 %r
}
270 changes: 270 additions & 0 deletions llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,270 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86
; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64

declare i64 @llvm.abs.i64(i64, i1)
declare <2 x i64> @llvm.abs.2xi64(<2 x i64>, i1)
declare i32 @llvm.abs.i32(i32, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i8 @llvm.abs.i8(i8, i1)

define i1 @eq_pow_or(i32 %0) nounwind {
; X86-LABEL: eq_pow_or:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl $32, %eax
; X86-NEXT: testl $-65, %eax
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: eq_pow_or:
; X64: # %bb.0:
; X64-NEXT: addl $32, %edi
; X64-NEXT: testl $-65, %edi
; X64-NEXT: sete %al
; X64-NEXT: retq
%2 = icmp eq i32 %0, 32
%3 = icmp eq i32 %0, -32
%4 = or i1 %2, %3
ret i1 %4
}

define i1 @ne_pow_and(i8 %0) nounwind {
; X86-LABEL: ne_pow_and:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addb $16, %al
; X86-NEXT: testb $-33, %al
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: ne_pow_and:
; X64: # %bb.0:
; X64-NEXT: addb $16, %dil
; X64-NEXT: testb $-33, %dil
; X64-NEXT: setne %al
; X64-NEXT: retq
%2 = icmp ne i8 %0, 16
%3 = icmp ne i8 %0, -16
%4 = and i1 %2, %3
ret i1 %4
}

define i1 @eq_pow_mismatch_or(i32 %0) nounwind {
; X86-LABEL: eq_pow_mismatch_or:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmpl $16, %eax
; X86-NEXT: sete %cl
; X86-NEXT: cmpl $-32, %eax
; X86-NEXT: sete %al
; X86-NEXT: orb %cl, %al
; X86-NEXT: retl
;
; X64-LABEL: eq_pow_mismatch_or:
; X64: # %bb.0:
; X64-NEXT: cmpl $16, %edi
; X64-NEXT: sete %cl
; X64-NEXT: cmpl $-32, %edi
; X64-NEXT: sete %al
; X64-NEXT: orb %cl, %al
; X64-NEXT: retq
%2 = icmp eq i32 %0, 16
%3 = icmp eq i32 %0, -32
%4 = or i1 %2, %3
ret i1 %4
}

define i1 @ne_non_pow_and(i8 %0) nounwind {
; X86-LABEL: ne_non_pow_and:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmpb $17, %al
; X86-NEXT: setne %cl
; X86-NEXT: cmpb $-17, %al
; X86-NEXT: setne %al
; X86-NEXT: andb %cl, %al
; X86-NEXT: retl
;
; X64-LABEL: ne_non_pow_and:
; X64: # %bb.0:
; X64-NEXT: cmpb $17, %dil
; X64-NEXT: setne %cl
; X64-NEXT: cmpb $-17, %dil
; X64-NEXT: setne %al
; X64-NEXT: andb %cl, %al
; X64-NEXT: retq
%2 = icmp ne i8 %0, 17
%3 = icmp ne i8 %0, -17
%4 = and i1 %2, %3
ret i1 %4
}

define i1 @ne_pow_or(i32 %0) nounwind {
; X86-LABEL: ne_pow_or:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: xorl $32, %ecx
; X86-NEXT: xorl $-32, %eax
; X86-NEXT: orl %ecx, %eax
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: ne_pow_or:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorl $32, %eax
; X64-NEXT: xorl $-32, %edi
; X64-NEXT: orl %eax, %edi
; X64-NEXT: setne %al
; X64-NEXT: retq
%2 = icmp ne i32 %0, 32
%3 = icmp ne i32 %0, -32
%4 = or i1 %2, %3
ret i1 %4
}

define i1 @eq_pow_and(i8 %0) nounwind {
; X86-LABEL: eq_pow_and:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: xorb $16, %cl
; X86-NEXT: xorb $-16, %al
; X86-NEXT: orb %cl, %al
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: eq_pow_and:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorb $16, %al
; X64-NEXT: xorb $-16, %dil
; X64-NEXT: orb %al, %dil
; X64-NEXT: sete %al
; X64-NEXT: retq
%2 = icmp eq i8 %0, 16
%3 = icmp eq i8 %0, -16
%4 = and i1 %2, %3
ret i1 %4
}

define i1 @abs_eq_pow2(i32 %0) nounwind {
; X86-LABEL: abs_eq_pow2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl $4, %eax
; X86-NEXT: testl $-9, %eax
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: abs_eq_pow2:
; X64: # %bb.0:
; X64-NEXT: addl $4, %edi
; X64-NEXT: testl $-9, %edi
; X64-NEXT: sete %al
; X64-NEXT: retq
%2 = tail call i32 @llvm.abs.i32(i32 %0, i1 true)
%3 = icmp eq i32 %2, 4
ret i1 %3
}

define i1 @abs_ne_pow2(i64 %0) nounwind {
; X86-LABEL: abs_ne_pow2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: addl $2, %eax
; X86-NEXT: adcl $0, %ecx
; X86-NEXT: andl $-5, %eax
; X86-NEXT: orl %ecx, %eax
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: abs_ne_pow2:
; X64: # %bb.0:
; X64-NEXT: addq $2, %rdi
; X64-NEXT: testq $-5, %rdi
; X64-NEXT: setne %al
; X64-NEXT: retq
%2 = tail call i64 @llvm.abs.i64(i64 %0, i1 true)
%3 = icmp ne i64 %2, 2
ret i1 %3
}

define i1 @abs_ne_nonpow2(i16 %0) nounwind {
; X86-LABEL: abs_ne_nonpow2:
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movswl %ax, %ecx
; X86-NEXT: sarl $15, %ecx
; X86-NEXT: xorl %ecx, %eax
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: movzwl %ax, %eax
; X86-NEXT: cmpl $57344, %eax # imm = 0xE000
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: abs_ne_nonpow2:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: negw %ax
; X64-NEXT: cmovsw %di, %ax
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: cmpl $57344, %eax # imm = 0xE000
; X64-NEXT: setne %al
; X64-NEXT: retq
%2 = tail call i16 @llvm.abs.i16(i16 %0, i1 true)
%3 = icmp ne i16 %2, -8192
ret i1 %3
}

define <2 x i1> @abs_ne_vec(<2 x i64> %0) nounwind {
; X86-LABEL: abs_ne_vec:
; X86: # %bb.0:
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %esi
; X86-NEXT: sarl $31, %esi
; X86-NEXT: xorl %esi, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: xorl %esi, %edx
; X86-NEXT: subl %esi, %edx
; X86-NEXT: sbbl %esi, %ecx
; X86-NEXT: movl %eax, %esi
; X86-NEXT: sarl $31, %esi
; X86-NEXT: xorl %esi, %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: xorl %esi, %edi
; X86-NEXT: subl %esi, %edi
; X86-NEXT: sbbl %esi, %eax
; X86-NEXT: xorl $8, %edi
; X86-NEXT: orl %eax, %edi
; X86-NEXT: setne %al
; X86-NEXT: xorl $8, %edx
; X86-NEXT: orl %ecx, %edx
; X86-NEXT: setne %dl
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl
;
; X64-LABEL: abs_ne_vec:
; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrad $31, %xmm1
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X64-NEXT: pxor %xmm1, %xmm0
; X64-NEXT: psubq %xmm1, %xmm0
; X64-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X64-NEXT: pand %xmm1, %xmm0
; X64-NEXT: pcmpeqd %xmm1, %xmm1
; X64-NEXT: pxor %xmm1, %xmm0
; X64-NEXT: retq
%2 = tail call <2 x i64> @llvm.abs.2xi64(<2 x i64> %0, i1 true)
%3 = icmp ne <2 x i64> %2, <i64 8, i64 8>
ret <2 x i1> %3
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
; AVX512VL-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: vpternlogd $200, %xmm1, %xmm2, %xmm0
; AVX512VL-NEXT: vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
; AVX512VL-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
; AVX512VL-NEXT: kmovw %k1, %edx
; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
Expand Down
20 changes: 6 additions & 14 deletions llvm/test/CodeGen/X86/vector-compare-simplify.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@ define <4 x i32> @sgt_min(<4 x i32> %x) {
define <4 x i32> @sle_min(<4 x i32> %x) {
; CHECK-LABEL: sle_min:
; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
%cmp = icmp sle <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
%r = sext <4 x i1> %cmp to <4 x i32>
Expand Down Expand Up @@ -80,10 +78,7 @@ define <4 x i32> @slt_max(<4 x i32> %x) {
define <4 x i32> @sge_max(<4 x i32> %x) {
; CHECK-LABEL: sge_max:
; CHECK: # %bb.0:
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
%cmp = icmp sge <4 x i32> %x, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%r = sext <4 x i1> %cmp to <4 x i32>
Expand Down Expand Up @@ -197,10 +192,7 @@ define <4 x i32> @slt_min_plus1(<4 x i32> %x) {
define <4 x i32> @sge_min_plus1(<4 x i32> %x) {
; CHECK-LABEL: sge_min_plus1:
; CHECK: # %bb.0:
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483649,2147483649,2147483649,2147483649]
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm0
; CHECK-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
%cmp = icmp sge <4 x i32> %x, <i32 -2147483647, i32 -2147483647, i32 -2147483647, i32 -2147483647>
%r = sext <4 x i1> %cmp to <4 x i32>
Expand All @@ -220,9 +212,9 @@ define <4 x i32> @sgt_max_minus1(<4 x i32> %x) {
define <4 x i32> @sle_max_minus1(<4 x i32> %x) {
; CHECK-LABEL: sle_max_minus1:
; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm1, %xmm0
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sle <4 x i32> %x, <i32 2147483646, i32 2147483646, i32 2147483646, i32 2147483646>
%r = sext <4 x i1> %cmp to <4 x i32>
Expand Down
2,310 changes: 948 additions & 1,362 deletions llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll

Large diffs are not rendered by default.

2,145 changes: 863 additions & 1,282 deletions llvm/test/CodeGen/X86/vector-popcnt-256-ult-ugt.ll

Large diffs are not rendered by default.

1,832 changes: 896 additions & 936 deletions llvm/test/CodeGen/X86/vector-popcnt-512-ult-ugt.ll

Large diffs are not rendered by default.