diff --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll index 403423cd69f42..e6ea99f8fb2bf 100644 --- a/llvm/test/CodeGen/X86/freeze-binary.ll +++ b/llvm/test/CodeGen/X86/freeze-binary.ll @@ -2,6 +2,120 @@ ; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=X86 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=X64 +define i32 @freeze_and(i32 %a0) nounwind { +; X86-LABEL: freeze_and: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $15, %eax +; X86-NEXT: andl $7, %eax +; X86-NEXT: retl +; +; X64-LABEL: freeze_and: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andl $15, %eax +; X64-NEXT: andl $7, %eax +; X64-NEXT: retq + %x = and i32 %a0, 15 + %y = freeze i32 %x + %z = and i32 %y, 7 + ret i32 %z +} + +define <2 x i64> @freeze_and_vec(<2 x i64> %a0) nounwind { +; X86-LABEL: freeze_and_vec: +; X86: # %bb.0: +; X86-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: freeze_and_vec: +; X64: # %bb.0: +; X64-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: retq + %x = and <2 x i64> %a0, + %y = freeze <2 x i64> %x + %z = and <2 x i64> %y, + ret <2 x i64> %z +} + +define i32 @freeze_or(i32 %a0) nounwind { +; X86-LABEL: freeze_or: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: orl $3, %eax +; X86-NEXT: orl $12, %eax +; X86-NEXT: retl +; +; X64-LABEL: freeze_or: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: orl $3, %eax +; X64-NEXT: orl $12, %eax +; X64-NEXT: retq + %x = or i32 %a0, 3 + %y = freeze i32 %x + %z = or i32 %y, 12 + ret i32 %z +} + +define <2 x i64> @freeze_or_vec(<2 x i64> %a0) nounwind { +; X86-LABEL: freeze_or_vec: +; X86: # %bb.0: +; X86-NEXT: orps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: orps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: freeze_or_vec: +; X64: # %bb.0: +; X64-NEXT: vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: retq + %x = or <2 x i64> %a0, + %y = freeze <2 x i64> %x + %z = or <2 x i64> %y, + ret <2 x i64> %z +} + +define i32 @freeze_xor(i32 %a0) nounwind { +; X86-LABEL: freeze_xor: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorl $3, %eax +; X86-NEXT: xorl $12, %eax +; X86-NEXT: retl +; +; X64-LABEL: freeze_xor: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: xorl $3, %eax +; X64-NEXT: xorl $12, %eax +; X64-NEXT: retq + %x = xor i32 %a0, 3 + %y = freeze i32 %x + %z = xor i32 %y, 12 + ret i32 %z +} + +define <8 x i16> @freeze_xor_vec(<8 x i16> %a0) nounwind { +; X86-LABEL: freeze_xor_vec: +; X86: # %bb.0: +; X86-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: freeze_xor_vec: +; X64: # %bb.0: +; X64-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: retq + %x = xor <8 x i16> %a0, + %y = freeze <8 x i16> %x + %z = xor <8 x i16> %y, + ret <8 x i16> %z +} + define i32 @freeze_add(i32 %a0) nounwind { ; X86-LABEL: freeze_add: ; X86: # %bb.0: