From 4c8382eec60d75825cda1041832a3278e9443d3f Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Fri, 19 Jan 2018 17:09:28 +0000 Subject: [PATCH] [x86] add RUN line and auto-generate checks There were checks for a 32-bit target here, but no RUN line corresponding to that prefix. I don't know what the intent of these tests is, but at least now we can see what happens for both targets. llvm-svn: 322961 --- llvm/test/CodeGen/X86/3addr-16bit.ll | 237 ++++++++++++++++++++------- 1 file changed, 182 insertions(+), 55 deletions(-) diff --git a/llvm/test/CodeGen/X86/3addr-16bit.ll b/llvm/test/CodeGen/X86/3addr-16bit.ll index c80e91a4d8b01..15cd82e19bdaf 100644 --- a/llvm/test/CodeGen/X86/3addr-16bit.ll +++ b/llvm/test/CodeGen/X86/3addr-16bit.ll @@ -1,96 +1,223 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -asm-verbose=false | FileCheck %s -check-prefix=64BIT +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-apple-darwin | FileCheck %s -check-prefix=X32 + ; rdar://7329206 ; In 32-bit the partial register stall would degrade performance. -define zeroext i16 @t1(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +define zeroext i16 @test1(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +; X64-LABEL: test1: +; X64: ## %bb.0: ## %entry +; X64-NEXT: movl %esi, %eax +; X64-NEXT: incl %eax +; X64-NEXT: cmpw %di, %si +; X64-NEXT: jne LBB0_2 +; X64-NEXT: ## %bb.1: ## %bb +; X64-NEXT: pushq %rbx +; X64-NEXT: movzwl %ax, %ebx +; X64-NEXT: movl %ebx, %edi +; X64-NEXT: callq _foo +; X64-NEXT: movl %ebx, %eax +; X64-NEXT: popq %rbx +; X64-NEXT: retq +; X64-NEXT: LBB0_2: ## %bb1 +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: retq +; +; X32-LABEL: test1: +; X32: ## %bb.0: ## %entry +; X32-NEXT: pushl %esi +; X32-NEXT: subl $8, %esp +; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %eax +; X32-NEXT: incl %eax +; X32-NEXT: cmpw {{[0-9]+}}(%esp), %cx +; X32-NEXT: jne LBB0_2 +; X32-NEXT: ## %bb.1: ## %bb +; X32-NEXT: movzwl %ax, %esi +; X32-NEXT: movl %esi, (%esp) +; X32-NEXT: calll _foo +; X32-NEXT: movl %esi, %eax +; X32-NEXT: jmp LBB0_3 +; X32-NEXT: LBB0_2: ## %bb1 +; X32-NEXT: movzwl %ax, %eax +; X32-NEXT: LBB0_3: ## %bb1 +; X32-NEXT: addl $8, %esp +; X32-NEXT: popl %esi +; X32-NEXT: retl entry: -; 32BIT-LABEL: t1: -; 32BIT: movw 20(%esp), %ax -; 32BIT-NOT: movw %ax, %cx -; 32BIT: leal 1(%eax), %ecx - -; 64BIT-LABEL: t1: -; 64BIT-NOT: movw %si, %ax -; 64BIT: movl %esi, %eax - %0 = icmp eq i16 %k, %c ; [#uses=1] - %1 = add i16 %k, 1 ; [#uses=3] + %0 = icmp eq i16 %k, %c + %1 = add i16 %k, 1 br i1 %0, label %bb, label %bb1 -bb: ; preds = %entry +bb: tail call void @foo(i16 zeroext %1) nounwind ret i16 %1 -bb1: ; preds = %entry +bb1: ret i16 %1 } -define zeroext i16 @t2(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +define zeroext i16 @test2(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +; X64-LABEL: test2: +; X64: ## %bb.0: ## %entry +; X64-NEXT: movl %esi, %eax +; X64-NEXT: decl %eax +; X64-NEXT: cmpw %di, %si +; X64-NEXT: jne LBB1_2 +; X64-NEXT: ## %bb.1: ## %bb +; X64-NEXT: pushq %rbx +; X64-NEXT: movzwl %ax, %ebx +; X64-NEXT: movl %ebx, %edi +; X64-NEXT: callq _foo +; X64-NEXT: movl %ebx, %eax +; X64-NEXT: popq %rbx +; X64-NEXT: retq +; X64-NEXT: LBB1_2: ## %bb1 +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: retq +; +; X32-LABEL: test2: +; X32: ## %bb.0: ## %entry +; X32-NEXT: pushl %esi +; X32-NEXT: subl $8, %esp +; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %eax +; X32-NEXT: decl %eax +; X32-NEXT: cmpw {{[0-9]+}}(%esp), %cx +; X32-NEXT: jne LBB1_2 +; X32-NEXT: ## %bb.1: ## %bb +; X32-NEXT: movzwl %ax, %esi +; X32-NEXT: movl %esi, (%esp) +; X32-NEXT: calll _foo +; X32-NEXT: movl %esi, %eax +; X32-NEXT: jmp LBB1_3 +; X32-NEXT: LBB1_2: ## %bb1 +; X32-NEXT: movzwl %ax, %eax +; X32-NEXT: LBB1_3: ## %bb1 +; X32-NEXT: addl $8, %esp +; X32-NEXT: popl %esi +; X32-NEXT: retl entry: -; 32BIT-LABEL: t2: -; 32BIT: movw 20(%esp), %ax -; 32BIT-NOT: movw %ax, %cx -; 32BIT: leal -1(%eax), %ecx - -; 64BIT-LABEL: t2: -; 64BIT-NOT: movw %si, %ax -; 64BIT: movl %esi, %eax -; 64BIT: movzwl %ax - %0 = icmp eq i16 %k, %c ; [#uses=1] - %1 = add i16 %k, -1 ; [#uses=3] + %0 = icmp eq i16 %k, %c + %1 = add i16 %k, -1 br i1 %0, label %bb, label %bb1 -bb: ; preds = %entry +bb: tail call void @foo(i16 zeroext %1) nounwind ret i16 %1 -bb1: ; preds = %entry +bb1: ret i16 %1 } declare void @foo(i16 zeroext) -define zeroext i16 @t3(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +define zeroext i16 @test3(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +; X64-LABEL: test3: +; X64: ## %bb.0: ## %entry +; X64-NEXT: movl %esi, %eax +; X64-NEXT: addl $2, %eax +; X64-NEXT: cmpw %di, %si +; X64-NEXT: jne LBB2_2 +; X64-NEXT: ## %bb.1: ## %bb +; X64-NEXT: pushq %rbx +; X64-NEXT: movzwl %ax, %ebx +; X64-NEXT: movl %ebx, %edi +; X64-NEXT: callq _foo +; X64-NEXT: movl %ebx, %eax +; X64-NEXT: popq %rbx +; X64-NEXT: retq +; X64-NEXT: LBB2_2: ## %bb1 +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: retq +; +; X32-LABEL: test3: +; X32: ## %bb.0: ## %entry +; X32-NEXT: pushl %esi +; X32-NEXT: subl $8, %esp +; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %eax +; X32-NEXT: addl $2, %eax +; X32-NEXT: cmpw {{[0-9]+}}(%esp), %cx +; X32-NEXT: jne LBB2_2 +; X32-NEXT: ## %bb.1: ## %bb +; X32-NEXT: movzwl %ax, %esi +; X32-NEXT: movl %esi, (%esp) +; X32-NEXT: calll _foo +; X32-NEXT: movl %esi, %eax +; X32-NEXT: jmp LBB2_3 +; X32-NEXT: LBB2_2: ## %bb1 +; X32-NEXT: movzwl %ax, %eax +; X32-NEXT: LBB2_3: ## %bb1 +; X32-NEXT: addl $8, %esp +; X32-NEXT: popl %esi +; X32-NEXT: retl entry: -; 32BIT-LABEL: t3: -; 32BIT: movw 20(%esp), %ax -; 32BIT-NOT: movw %ax, %cx -; 32BIT: leal 2(%eax), %ecx - -; 64BIT-LABEL: t3: -; 64BIT-NOT: movw %si, %ax -; 64BIT: movl %esi, %eax - %0 = add i16 %k, 2 ; [#uses=3] - %1 = icmp eq i16 %k, %c ; [#uses=1] + %0 = add i16 %k, 2 + %1 = icmp eq i16 %k, %c br i1 %1, label %bb, label %bb1 -bb: ; preds = %entry +bb: tail call void @foo(i16 zeroext %0) nounwind ret i16 %0 -bb1: ; preds = %entry +bb1: ret i16 %0 } -define zeroext i16 @t4(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp { +; X64-LABEL: test4: +; X64: ## %bb.0: ## %entry +; X64-NEXT: movl %esi, %eax +; X64-NEXT: addl %edi, %eax +; X64-NEXT: cmpw %di, %si +; X64-NEXT: jne LBB3_2 +; X64-NEXT: ## %bb.1: ## %bb +; X64-NEXT: pushq %rbx +; X64-NEXT: movzwl %ax, %ebx +; X64-NEXT: movl %ebx, %edi +; X64-NEXT: callq _foo +; X64-NEXT: movl %ebx, %eax +; X64-NEXT: popq %rbx +; X64-NEXT: retq +; X64-NEXT: LBB3_2: ## %bb1 +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: retq +; +; X32-LABEL: test4: +; X32: ## %bb.0: ## %entry +; X32-NEXT: pushl %esi +; X32-NEXT: subl $8, %esp +; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzwl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: addl %ecx, %eax +; X32-NEXT: cmpw %cx, %dx +; X32-NEXT: jne LBB3_2 +; X32-NEXT: ## %bb.1: ## %bb +; X32-NEXT: movzwl %ax, %esi +; X32-NEXT: movl %esi, (%esp) +; X32-NEXT: calll _foo +; X32-NEXT: movl %esi, %eax +; X32-NEXT: jmp LBB3_3 +; X32-NEXT: LBB3_2: ## %bb1 +; X32-NEXT: movzwl %ax, %eax +; X32-NEXT: LBB3_3: ## %bb1 +; X32-NEXT: addl $8, %esp +; X32-NEXT: popl %esi +; X32-NEXT: retl entry: -; 32BIT-LABEL: t4: -; 32BIT: movw 16(%esp), %ax -; 32BIT: movw 20(%esp), %cx -; 32BIT-NOT: movw %cx, %dx -; 32BIT: leal (%ecx,%eax), %edx - -; 64BIT-LABEL: t4: -; 64BIT-NOT: movw %si, %ax -; 64BIT: movl %esi, %eax - %0 = add i16 %k, %c ; [#uses=3] - %1 = icmp eq i16 %k, %c ; [#uses=1] + %0 = add i16 %k, %c + %1 = icmp eq i16 %k, %c br i1 %1, label %bb, label %bb1 -bb: ; preds = %entry +bb: tail call void @foo(i16 zeroext %0) nounwind ret i16 %0 -bb1: ; preds = %entry +bb1: ret i16 %0 } +