diff --git a/llvm/test/CodeGen/X86/fast-isel-constpool.ll b/llvm/test/CodeGen/X86/fast-isel-constpool.ll index 4b8f387571e9b..706674c584baf 100644 --- a/llvm/test/CodeGen/X86/fast-isel-constpool.ll +++ b/llvm/test/CodeGen/X86/fast-isel-constpool.ll @@ -6,6 +6,10 @@ ; RUN: llc -mtriple=x86_64-apple-darwin -fast-isel -code-model=small -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX ; RUN: llc -mtriple=x86_64-apple-darwin -fast-isel -code-model=large -mattr=avx512f < %s | FileCheck %s --check-prefix=LARGE_AVX +; This large code mode shouldn't mean anything on x86 but it currently +; generates 64-bit only instructions and will assert in the encoder. +; RUN: llc -mtriple=i686-apple-darwin -fast-isel -code-model=large -mattr=sse2 < %s | FileCheck %s --check-prefix=X86-LARGE + ; Make sure fast isel uses rip-relative addressing for the small code model. define float @constpool_float(float %x) { ; CHECK-LABEL: constpool_float: @@ -31,6 +35,17 @@ define float @constpool_float(float %x) { ; LARGE_AVX-NEXT: movabsq $LCPI0_0, %rax ; LARGE_AVX-NEXT: vaddss (%rax), %xmm0, %xmm0 ; LARGE_AVX-NEXT: retq +; +; X86-LARGE-LABEL: constpool_float: +; X86-LARGE: ## %bb.0: +; X86-LARGE-NEXT: pushl %eax +; X86-LARGE-NEXT: .cfi_def_cfa_offset 8 +; X86-LARGE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-LARGE-NEXT: addss LCPI0_0, %xmm0 +; X86-LARGE-NEXT: movss %xmm0, (%esp) +; X86-LARGE-NEXT: flds (%esp) +; X86-LARGE-NEXT: popl %eax +; X86-LARGE-NEXT: retl %1 = fadd float %x, 16.50e+01 ret float %1 @@ -60,7 +75,107 @@ define double @constpool_double(double %x) nounwind { ; LARGE_AVX-NEXT: movabsq $LCPI1_0, %rax ; LARGE_AVX-NEXT: vaddsd (%rax), %xmm0, %xmm0 ; LARGE_AVX-NEXT: retq +; +; X86-LARGE-LABEL: constpool_double: +; X86-LARGE: ## %bb.0: +; X86-LARGE-NEXT: subl $12, %esp +; X86-LARGE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-LARGE-NEXT: addsd LCPI1_0, %xmm0 +; X86-LARGE-NEXT: movsd %xmm0, (%esp) +; X86-LARGE-NEXT: fldl (%esp) +; X86-LARGE-NEXT: addl $12, %esp +; X86-LARGE-NEXT: retl %1 = fadd double %x, 8.500000e-01 ret double %1 } + +define void @constpool_float_no_fp_args(float* %x) nounwind { +; CHECK-LABEL: constpool_float_no_fp_args: +; CHECK: ## %bb.0: +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: addss (%rdi), %xmm0 +; CHECK-NEXT: movss %xmm0, (%rdi) +; CHECK-NEXT: retq +; +; LARGE-LABEL: constpool_float_no_fp_args: +; LARGE: ## %bb.0: +; LARGE-NEXT: movabsq $LCPI2_0, %rax +; LARGE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; LARGE-NEXT: addss (%rdi), %xmm0 +; LARGE-NEXT: movss %xmm0, (%rdi) +; LARGE-NEXT: retq +; +; AVX-LABEL: constpool_float_no_fp_args: +; AVX: ## %bb.0: +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vaddss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovss %xmm0, (%rdi) +; AVX-NEXT: retq +; +; LARGE_AVX-LABEL: constpool_float_no_fp_args: +; LARGE_AVX: ## %bb.0: +; LARGE_AVX-NEXT: movabsq $LCPI2_0, %rax +; LARGE_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; LARGE_AVX-NEXT: vaddss (%rdi), %xmm0, %xmm0 +; LARGE_AVX-NEXT: vmovss %xmm0, (%rdi) +; LARGE_AVX-NEXT: retq +; +; X86-LARGE-LABEL: constpool_float_no_fp_args: +; X86-LARGE: ## %bb.0: +; X86-LARGE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-LARGE-NEXT: movabsq $LCPI2_0, %rcx +; X86-LARGE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-LARGE-NEXT: addss (%eax), %xmm0 +; X86-LARGE-NEXT: movss %xmm0, (%eax) +; X86-LARGE-NEXT: retl + %a = load float, float* %x + %b = fadd float %a, 16.50e+01 + store float %b, float* %x + ret void +} + +define void @constpool_double_no_fp_args(double* %x) nounwind { +; CHECK-LABEL: constpool_double_no_fp_args: +; CHECK: ## %bb.0: +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: addsd (%rdi), %xmm0 +; CHECK-NEXT: movsd %xmm0, (%rdi) +; CHECK-NEXT: retq +; +; LARGE-LABEL: constpool_double_no_fp_args: +; LARGE: ## %bb.0: +; LARGE-NEXT: movabsq $LCPI3_0, %rax +; LARGE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; LARGE-NEXT: addsd (%rdi), %xmm0 +; LARGE-NEXT: movsd %xmm0, (%rdi) +; LARGE-NEXT: retq +; +; AVX-LABEL: constpool_double_no_fp_args: +; AVX: ## %bb.0: +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vaddsd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovsd %xmm0, (%rdi) +; AVX-NEXT: retq +; +; LARGE_AVX-LABEL: constpool_double_no_fp_args: +; LARGE_AVX: ## %bb.0: +; LARGE_AVX-NEXT: movabsq $LCPI3_0, %rax +; LARGE_AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; LARGE_AVX-NEXT: vaddsd (%rdi), %xmm0, %xmm0 +; LARGE_AVX-NEXT: vmovsd %xmm0, (%rdi) +; LARGE_AVX-NEXT: retq +; +; X86-LARGE-LABEL: constpool_double_no_fp_args: +; X86-LARGE: ## %bb.0: +; X86-LARGE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-LARGE-NEXT: movabsq $LCPI3_0, %rcx +; X86-LARGE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-LARGE-NEXT: addsd (%eax), %xmm0 +; X86-LARGE-NEXT: movsd %xmm0, (%eax) +; X86-LARGE-NEXT: retl + %a = load double, double* %x + %b = fadd double %a, 8.500000e-01 + store double %b, double* %x + ret void +}