From f541b9eedb614cfdcf763ceb60892c86c154967d Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 12 Nov 2025 10:07:56 +0000 Subject: [PATCH] [X86] bitcnt-big-integer.ll - add zero_undef test coverage --- llvm/test/CodeGen/X86/bitcnt-big-integer.ll | 2841 +++++++++++++++++-- 1 file changed, 2668 insertions(+), 173 deletions(-) diff --git a/llvm/test/CodeGen/X86/bitcnt-big-integer.ll b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll index 13149d78b16fb..fe3fbf141682a 100644 --- a/llvm/test/CodeGen/X86/bitcnt-big-integer.ll +++ b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll @@ -1806,15 +1806,2518 @@ define i32 @load_ctlz_i1024(ptr %p0) nounwind { ret i32 %res } +; +; CTLZ_ZERO_UNDEF +; + +define i32 @test_ctlz_undef_i128(i128 %a0) nounwind { +; SSE-LABEL: test_ctlz_undef_i128: +; SSE: # %bb.0: +; SSE-NEXT: bsrq %rsi, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: bsrq %rdi, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rsi, %rsi +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: test_ctlz_undef_i128: +; AVX2: # %bb.0: +; AVX2-NEXT: lzcntq %rsi, %rcx +; AVX2-NEXT: lzcntq %rdi, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_ctlz_undef_i128: +; AVX512: # %bb.0: +; AVX512-NEXT: lzcntq %rsi, %rcx +; AVX512-NEXT: lzcntq %rdi, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %cnt = call i128 @llvm.ctlz.i128(i128 %a0, i1 -1) + %res = trunc i128 %cnt to i32 + ret i32 %res +} + +define i32 @load_ctlz_undef_i128(ptr %p0) nounwind { +; SSE-LABEL: load_ctlz_undef_i128: +; SSE: # %bb.0: +; SSE-NEXT: movq 8(%rdi), %rcx +; SSE-NEXT: bsrq %rcx, %rdx +; SSE-NEXT: xorl $63, %edx +; SSE-NEXT: bsrq (%rdi), %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %edx, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: load_ctlz_undef_i128: +; AVX2: # %bb.0: +; AVX2-NEXT: movq 8(%rdi), %rcx +; AVX2-NEXT: lzcntq %rcx, %rdx +; AVX2-NEXT: lzcntq (%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %edx, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_ctlz_undef_i128: +; AVX512: # %bb.0: +; AVX512-NEXT: movq 8(%rdi), %rcx +; AVX512-NEXT: lzcntq %rcx, %rdx +; AVX512-NEXT: lzcntq (%rdi), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %edx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %a0 = load i128, ptr %p0 + %cnt = call i128 @llvm.ctlz.i128(i128 %a0, i1 -1) + %res = trunc i128 %cnt to i32 + ret i32 %res +} + +define i32 @test_ctlz_undef_i256(i256 %a0) nounwind { +; SSE-LABEL: test_ctlz_undef_i256: +; SSE: # %bb.0: +; SSE-NEXT: bsrq %rcx, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %rdx, %r8 +; SSE-NEXT: xorl $63, %r8d +; SSE-NEXT: orl $64, %r8d +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %eax, %r8d +; SSE-NEXT: bsrq %rsi, %r9 +; SSE-NEXT: xorl $63, %r9d +; SSE-NEXT: bsrq %rdi, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rsi, %rsi +; SSE-NEXT: cmovnel %r9d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %rcx, %rdx +; SSE-NEXT: cmovnel %r8d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: test_ctlz_undef_i256: +; AVX2: # %bb.0: +; AVX2-NEXT: lzcntq %rcx, %rax +; AVX2-NEXT: lzcntq %rdx, %r8 +; AVX2-NEXT: addl $64, %r8d +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %eax, %r8d +; AVX2-NEXT: lzcntq %rsi, %r9 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rdi, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %r9d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rcx, %rdx +; AVX2-NEXT: cmovnel %r8d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_ctlz_undef_i256: +; AVX512: # %bb.0: +; AVX512-NEXT: lzcntq %rcx, %rax +; AVX512-NEXT: lzcntq %rdx, %r8 +; AVX512-NEXT: addl $64, %r8d +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %eax, %r8d +; AVX512-NEXT: lzcntq %rsi, %r9 +; AVX512-NEXT: lzcntq %rdi, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %r9d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %rcx, %rdx +; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 -1) + %res = trunc i256 %cnt to i32 + ret i32 %res +} + +define i32 @load_ctlz_undef_i256(ptr %p0) nounwind { +; SSE-LABEL: load_ctlz_undef_i256: +; SSE: # %bb.0: +; SSE-NEXT: movq 8(%rdi), %rdx +; SSE-NEXT: movq 16(%rdi), %rcx +; SSE-NEXT: movq 24(%rdi), %rsi +; SSE-NEXT: bsrq %rsi, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %rcx, %r8 +; SSE-NEXT: xorl $63, %r8d +; SSE-NEXT: orl $64, %r8d +; SSE-NEXT: testq %rsi, %rsi +; SSE-NEXT: cmovnel %eax, %r8d +; SSE-NEXT: bsrq %rdx, %r9 +; SSE-NEXT: xorl $63, %r9d +; SSE-NEXT: bsrq (%rdi), %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %r9d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %rsi, %rcx +; SSE-NEXT: cmovnel %r8d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: load_ctlz_undef_i256: +; AVX2: # %bb.0: +; AVX2-NEXT: movq 16(%rdi), %rcx +; AVX2-NEXT: movq 24(%rdi), %rdx +; AVX2-NEXT: lzcntq %rdx, %rax +; AVX2-NEXT: lzcntq %rcx, %rsi +; AVX2-NEXT: addl $64, %esi +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %esi +; AVX2-NEXT: movq 8(%rdi), %r8 +; AVX2-NEXT: lzcntq %r8, %r9 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq (%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %r9d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rdx, %rcx +; AVX2-NEXT: cmovnel %esi, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_ctlz_undef_i256: +; AVX512: # %bb.0: +; AVX512-NEXT: movq 8(%rdi), %rcx +; AVX512-NEXT: movq 16(%rdi), %rdx +; AVX512-NEXT: movq 24(%rdi), %rsi +; AVX512-NEXT: lzcntq %rsi, %rax +; AVX512-NEXT: lzcntq %rdx, %r8 +; AVX512-NEXT: addl $64, %r8d +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %eax, %r8d +; AVX512-NEXT: lzcntq %rcx, %r9 +; AVX512-NEXT: lzcntq (%rdi), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %r9d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %rsi, %rdx +; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %a0 = load i256, ptr %p0 + %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 -1) + %res = trunc i256 %cnt to i32 + ret i32 %res +} + +define i32 @test_ctlz_undef_i512(i512 %a0) nounwind { +; SSE-LABEL: test_ctlz_undef_i512: +; SSE: # %bb.0: +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; SSE-NEXT: bsrq %r11, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %r10, %r14 +; SSE-NEXT: xorl $63, %r14d +; SSE-NEXT: orl $64, %r14d +; SSE-NEXT: testq %r11, %r11 +; SSE-NEXT: cmovnel %eax, %r14d +; SSE-NEXT: bsrq %r9, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %r8, %rbx +; SSE-NEXT: xorl $63, %ebx +; SSE-NEXT: orl $64, %ebx +; SSE-NEXT: testq %r9, %r9 +; SSE-NEXT: cmovnel %eax, %ebx +; SSE-NEXT: subl $-128, %ebx +; SSE-NEXT: movq %r10, %rax +; SSE-NEXT: orq %r11, %rax +; SSE-NEXT: cmovnel %r14d, %ebx +; SSE-NEXT: bsrq %rcx, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %rdx, %r14 +; SSE-NEXT: xorl $63, %r14d +; SSE-NEXT: orl $64, %r14d +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %eax, %r14d +; SSE-NEXT: bsrq %rsi, %r15 +; SSE-NEXT: xorl $63, %r15d +; SSE-NEXT: bsrq %rdi, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rsi, %rsi +; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %rcx, %rdx +; SSE-NEXT: cmovnel %r14d, %eax +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r11, %r9 +; SSE-NEXT: orq %r10, %r8 +; SSE-NEXT: orq %r9, %r8 +; SSE-NEXT: cmovnel %ebx, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: retq +; +; AVX2-LABEL: test_ctlz_undef_i512: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; AVX2-NEXT: lzcntq %r11, %rax +; AVX2-NEXT: xorl %r14d, %r14d +; AVX2-NEXT: lzcntq %r10, %r14 +; AVX2-NEXT: addl $64, %r14d +; AVX2-NEXT: testq %r11, %r11 +; AVX2-NEXT: cmovnel %eax, %r14d +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %r9, %rax +; AVX2-NEXT: xorl %ebx, %ebx +; AVX2-NEXT: lzcntq %r8, %rbx +; AVX2-NEXT: addl $64, %ebx +; AVX2-NEXT: testq %r9, %r9 +; AVX2-NEXT: cmovnel %eax, %ebx +; AVX2-NEXT: subl $-128, %ebx +; AVX2-NEXT: movq %r10, %rax +; AVX2-NEXT: orq %r11, %rax +; AVX2-NEXT: cmovnel %r14d, %ebx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rcx, %rax +; AVX2-NEXT: xorl %r14d, %r14d +; AVX2-NEXT: lzcntq %rdx, %r14 +; AVX2-NEXT: addl $64, %r14d +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %eax, %r14d +; AVX2-NEXT: xorl %r15d, %r15d +; AVX2-NEXT: lzcntq %rsi, %r15 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rdi, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %r15d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rcx, %rdx +; AVX2-NEXT: cmovnel %r14d, %eax +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r11, %r9 +; AVX2-NEXT: orq %r10, %r8 +; AVX2-NEXT: orq %r9, %r8 +; AVX2-NEXT: cmovnel %ebx, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_ctlz_undef_i512: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; AVX512-NEXT: lzcntq %r11, %rax +; AVX512-NEXT: lzcntq %r10, %r14 +; AVX512-NEXT: addl $64, %r14d +; AVX512-NEXT: testq %r11, %r11 +; AVX512-NEXT: cmovnel %eax, %r14d +; AVX512-NEXT: lzcntq %r9, %rax +; AVX512-NEXT: lzcntq %r8, %rbx +; AVX512-NEXT: addl $64, %ebx +; AVX512-NEXT: testq %r9, %r9 +; AVX512-NEXT: cmovnel %eax, %ebx +; AVX512-NEXT: subl $-128, %ebx +; AVX512-NEXT: movq %r10, %rax +; AVX512-NEXT: orq %r11, %rax +; AVX512-NEXT: cmovnel %r14d, %ebx +; AVX512-NEXT: lzcntq %rcx, %rax +; AVX512-NEXT: lzcntq %rdx, %r14 +; AVX512-NEXT: addl $64, %r14d +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %eax, %r14d +; AVX512-NEXT: lzcntq %rsi, %r15 +; AVX512-NEXT: lzcntq %rdi, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %r15d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %rcx, %rdx +; AVX512-NEXT: cmovnel %r14d, %eax +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r11, %r9 +; AVX512-NEXT: orq %r10, %r8 +; AVX512-NEXT: orq %r9, %r8 +; AVX512-NEXT: cmovnel %ebx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: retq + %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 -1) + %res = trunc i512 %cnt to i32 + ret i32 %res +} + +define i32 @load_ctlz_undef_i512(ptr %p0) nounwind { +; SSE-LABEL: load_ctlz_undef_i512: +; SSE: # %bb.0: +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq 8(%rdi), %r11 +; SSE-NEXT: movq 16(%rdi), %r9 +; SSE-NEXT: movq 24(%rdi), %r10 +; SSE-NEXT: movq 32(%rdi), %rcx +; SSE-NEXT: movq 40(%rdi), %rdx +; SSE-NEXT: movq 48(%rdi), %rsi +; SSE-NEXT: movq 56(%rdi), %r8 +; SSE-NEXT: bsrq %r8, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %rsi, %r14 +; SSE-NEXT: xorl $63, %r14d +; SSE-NEXT: orl $64, %r14d +; SSE-NEXT: testq %r8, %r8 +; SSE-NEXT: cmovnel %eax, %r14d +; SSE-NEXT: bsrq %rdx, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %rcx, %rbx +; SSE-NEXT: xorl $63, %ebx +; SSE-NEXT: orl $64, %ebx +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %eax, %ebx +; SSE-NEXT: subl $-128, %ebx +; SSE-NEXT: movq %rsi, %rax +; SSE-NEXT: orq %r8, %rax +; SSE-NEXT: cmovnel %r14d, %ebx +; SSE-NEXT: bsrq %r10, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %r9, %r14 +; SSE-NEXT: xorl $63, %r14d +; SSE-NEXT: orl $64, %r14d +; SSE-NEXT: testq %r10, %r10 +; SSE-NEXT: cmovnel %eax, %r14d +; SSE-NEXT: bsrq %r11, %r15 +; SSE-NEXT: xorl $63, %r15d +; SSE-NEXT: bsrq (%rdi), %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %r11, %r11 +; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %r10, %r9 +; SSE-NEXT: cmovnel %r14d, %eax +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r8, %rdx +; SSE-NEXT: orq %rsi, %rcx +; SSE-NEXT: orq %rdx, %rcx +; SSE-NEXT: cmovnel %ebx, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: retq +; +; AVX2-LABEL: load_ctlz_undef_i512: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq 8(%rdi), %r10 +; AVX2-NEXT: movq 16(%rdi), %r9 +; AVX2-NEXT: movq 32(%rdi), %rcx +; AVX2-NEXT: movq 40(%rdi), %rdx +; AVX2-NEXT: movq 48(%rdi), %rsi +; AVX2-NEXT: movq 56(%rdi), %r8 +; AVX2-NEXT: lzcntq %r8, %rax +; AVX2-NEXT: xorl %ebx, %ebx +; AVX2-NEXT: lzcntq %rsi, %rbx +; AVX2-NEXT: addl $64, %ebx +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %eax, %ebx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rdx, %rax +; AVX2-NEXT: lzcntq %rcx, %r11 +; AVX2-NEXT: addl $64, %r11d +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %r11d +; AVX2-NEXT: subl $-128, %r11d +; AVX2-NEXT: movq %rsi, %rax +; AVX2-NEXT: orq %r8, %rax +; AVX2-NEXT: cmovnel %ebx, %r11d +; AVX2-NEXT: movq 24(%rdi), %rbx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rbx, %rax +; AVX2-NEXT: xorl %r14d, %r14d +; AVX2-NEXT: lzcntq %r9, %r14 +; AVX2-NEXT: addl $64, %r14d +; AVX2-NEXT: testq %rbx, %rbx +; AVX2-NEXT: cmovnel %eax, %r14d +; AVX2-NEXT: xorl %r15d, %r15d +; AVX2-NEXT: lzcntq %r10, %r15 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq (%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %r15d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rbx, %r9 +; AVX2-NEXT: cmovnel %r14d, %eax +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r8, %rdx +; AVX2-NEXT: orq %rsi, %rcx +; AVX2-NEXT: orq %rdx, %rcx +; AVX2-NEXT: cmovnel %r11d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_ctlz_undef_i512: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq 8(%rdi), %r11 +; AVX512-NEXT: movq 16(%rdi), %r9 +; AVX512-NEXT: movq 24(%rdi), %r10 +; AVX512-NEXT: movq 32(%rdi), %rcx +; AVX512-NEXT: movq 40(%rdi), %rdx +; AVX512-NEXT: movq 48(%rdi), %rsi +; AVX512-NEXT: movq 56(%rdi), %r8 +; AVX512-NEXT: lzcntq %r8, %rax +; AVX512-NEXT: lzcntq %rsi, %r14 +; AVX512-NEXT: addl $64, %r14d +; AVX512-NEXT: testq %r8, %r8 +; AVX512-NEXT: cmovnel %eax, %r14d +; AVX512-NEXT: lzcntq %rdx, %rax +; AVX512-NEXT: lzcntq %rcx, %rbx +; AVX512-NEXT: addl $64, %ebx +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %ebx +; AVX512-NEXT: subl $-128, %ebx +; AVX512-NEXT: movq %rsi, %rax +; AVX512-NEXT: orq %r8, %rax +; AVX512-NEXT: cmovnel %r14d, %ebx +; AVX512-NEXT: lzcntq %r10, %rax +; AVX512-NEXT: lzcntq %r9, %r14 +; AVX512-NEXT: addl $64, %r14d +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: cmovnel %eax, %r14d +; AVX512-NEXT: lzcntq (%rdi), %rax +; AVX512-NEXT: lzcntq %r11, %rdi +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %r11, %r11 +; AVX512-NEXT: cmovnel %edi, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %r10, %r9 +; AVX512-NEXT: cmovnel %r14d, %eax +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r8, %rdx +; AVX512-NEXT: orq %rsi, %rcx +; AVX512-NEXT: orq %rdx, %rcx +; AVX512-NEXT: cmovnel %ebx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: retq + %a0 = load i512, ptr %p0 + %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 -1) + %res = trunc i512 %cnt to i32 + ret i32 %res +} + +define i32 @test_ctlz_undef_i1024(i1024 %a0) nounwind { +; SSE-LABEL: test_ctlz_undef_i1024: +; SSE: # %bb.0: +; SSE-NEXT: pushq %rbp +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %r13 +; SSE-NEXT: pushq %r12 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq %r9, %r12 +; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r13 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; SSE-NEXT: bsrq %r11, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %rsi, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: orl $64, %ecx +; SSE-NEXT: testq %r11, %r11 +; SSE-NEXT: cmovnel %eax, %ecx +; SSE-NEXT: bsrq %rdx, %r10 +; SSE-NEXT: xorl $63, %r10d +; SSE-NEXT: bsrq {{[0-9]+}}(%rsp), %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %r10d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: movq %rsi, %r9 +; SSE-NEXT: movq %rsi, %rbx +; SSE-NEXT: orq %r11, %r9 +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: bsrq %r15, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: bsrq %r13, %rsi +; SSE-NEXT: xorl $63, %esi +; SSE-NEXT: orl $64, %esi +; SSE-NEXT: testq %r15, %r15 +; SSE-NEXT: cmovnel %ecx, %esi +; SSE-NEXT: bsrq %r14, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r9 +; SSE-NEXT: bsrq %r9, %rbp +; SSE-NEXT: xorl $63, %ebp +; SSE-NEXT: orl $64, %ebp +; SSE-NEXT: testq %r14, %r14 +; SSE-NEXT: cmovnel %ecx, %ebp +; SSE-NEXT: movq %r8, %r10 +; SSE-NEXT: subl $-128, %ebp +; SSE-NEXT: movq %r13, %rcx +; SSE-NEXT: orq %r15, %rcx +; SSE-NEXT: cmovnel %esi, %ebp +; SSE-NEXT: addl $256, %ebp # imm = 0x100 +; SSE-NEXT: orq %r11, %rdx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SSE-NEXT: orq %rbx, %rsi +; SSE-NEXT: orq %rdx, %rsi +; SSE-NEXT: cmovnel %eax, %ebp +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: bsrq %rdx, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; SSE-NEXT: bsrq %r8, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: bsrq %r12, %rsi +; SSE-NEXT: xorl $63, %esi +; SSE-NEXT: bsrq %r10, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: orl $64, %ecx +; SSE-NEXT: testq %r12, %r12 +; SSE-NEXT: cmovnel %esi, %ecx +; SSE-NEXT: movq %rdi, %rbx +; SSE-NEXT: subl $-128, %ecx +; SSE-NEXT: movq %r8, %rsi +; SSE-NEXT: orq %rdx, %rsi +; SSE-NEXT: cmovnel %eax, %ecx +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload +; SSE-NEXT: bsrq %r11, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload +; SSE-NEXT: bsrq %r8, %rdx +; SSE-NEXT: xorl $63, %edx +; SSE-NEXT: orl $64, %edx +; SSE-NEXT: testq %r11, %r11 +; SSE-NEXT: cmovnel %eax, %edx +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; SSE-NEXT: bsrq %rdi, %rsi +; SSE-NEXT: xorl $63, %esi +; SSE-NEXT: bsrq %rbx, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: cmovnel %esi, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %r11, %r8 +; SSE-NEXT: cmovnel %edx, %eax +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r12 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10 +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r12, %r10 +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r15 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r14 +; SSE-NEXT: orq %r15, %r14 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r13 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r9 +; SSE-NEXT: orq %r13, %r9 +; SSE-NEXT: addl $512, %eax # imm = 0x200 +; SSE-NEXT: orq %r14, %r9 +; SSE-NEXT: cmovnel %ebp, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r12 +; SSE-NEXT: popq %r13 +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: popq %rbp +; SSE-NEXT: retq +; +; AVX2-LABEL: test_ctlz_undef_i1024: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq %r9, %r14 +; AVX2-NEXT: movq %r8, %r11 +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %r12, %rcx +; AVX2-NEXT: xorl %r9d, %r9d +; AVX2-NEXT: lzcntq %r8, %r9 +; AVX2-NEXT: addl $64, %r9d +; AVX2-NEXT: testq %r12, %r12 +; AVX2-NEXT: cmovnel %ecx, %r9d +; AVX2-NEXT: xorl %esi, %esi +; AVX2-NEXT: lzcntq %r10, %rsi +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %rax, %rcx +; AVX2-NEXT: addl $64, %ecx +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %esi, %ecx +; AVX2-NEXT: subl $-128, %ecx +; AVX2-NEXT: movq %r8, %rsi +; AVX2-NEXT: orq %r12, %rsi +; AVX2-NEXT: cmovnel %r9d, %ecx +; AVX2-NEXT: xorl %edi, %edi +; AVX2-NEXT: lzcntq %rbx, %rdi +; AVX2-NEXT: xorl %esi, %esi +; AVX2-NEXT: lzcntq %r15, %rsi +; AVX2-NEXT: addl $64, %esi +; AVX2-NEXT: testq %rbx, %rbx +; AVX2-NEXT: cmovnel %edi, %esi +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13 +; AVX2-NEXT: xorl %ebp, %ebp +; AVX2-NEXT: lzcntq %r13, %rbp +; AVX2-NEXT: addl $64, %ebp +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r9 +; AVX2-NEXT: xorl %edi, %edi +; AVX2-NEXT: lzcntq %r9, %rdi +; AVX2-NEXT: testq %r9, %r9 +; AVX2-NEXT: cmovnel %edi, %ebp +; AVX2-NEXT: subl $-128, %ebp +; AVX2-NEXT: movq %r15, %rdi +; AVX2-NEXT: orq %rbx, %rdi +; AVX2-NEXT: cmovnel %esi, %ebp +; AVX2-NEXT: addl $256, %ebp # imm = 0x100 +; AVX2-NEXT: movq %r10, %rdi +; AVX2-NEXT: orq %r12, %rdi +; AVX2-NEXT: movq %rax, %rsi +; AVX2-NEXT: orq %r8, %rsi +; AVX2-NEXT: orq %rdi, %rsi +; AVX2-NEXT: cmovnel %ecx, %ebp +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rdi, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %r12, %rcx +; AVX2-NEXT: testq %r12, %r12 +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %r11, %rcx +; AVX2-NEXT: addl $64, %ecx +; AVX2-NEXT: xorl %esi, %esi +; AVX2-NEXT: lzcntq %r14, %rsi +; AVX2-NEXT: testq %r14, %r14 +; AVX2-NEXT: cmovnel %esi, %ecx +; AVX2-NEXT: subl $-128, %ecx +; AVX2-NEXT: movq %rdi, %rsi +; AVX2-NEXT: orq %r12, %rsi +; AVX2-NEXT: cmovnel %eax, %ecx +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: lzcntq %rdx, %rdx +; AVX2-NEXT: addl $64, %edx +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %r10, %rax +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %eax, %edx +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX2-NEXT: lzcntq %rax, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: lzcntq %rsi, %r8 +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %r8d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %r10, %rdi +; AVX2-NEXT: cmovnel %edx, %eax +; AVX2-NEXT: orq %r12, %r14 +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r11 +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r14, %r11 +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %rbx +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r9 +; AVX2-NEXT: orq %rbx, %r9 +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r15 +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r13 +; AVX2-NEXT: orq %r15, %r13 +; AVX2-NEXT: addl $512, %eax # imm = 0x200 +; AVX2-NEXT: orq %r9, %r13 +; AVX2-NEXT: cmovnel %ebp, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_ctlz_undef_i1024: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq %r9, %r14 +; AVX512-NEXT: movq %r8, %r11 +; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX512-NEXT: lzcntq %r12, %rcx +; AVX512-NEXT: lzcntq %r8, %r9 +; AVX512-NEXT: addl $64, %r9d +; AVX512-NEXT: testq %r12, %r12 +; AVX512-NEXT: cmovnel %ecx, %r9d +; AVX512-NEXT: lzcntq %r10, %rsi +; AVX512-NEXT: lzcntq %rax, %rcx +; AVX512-NEXT: addl $64, %ecx +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: cmovnel %esi, %ecx +; AVX512-NEXT: subl $-128, %ecx +; AVX512-NEXT: movq %r8, %rsi +; AVX512-NEXT: orq %r12, %rsi +; AVX512-NEXT: cmovnel %r9d, %ecx +; AVX512-NEXT: lzcntq %rbx, %rdi +; AVX512-NEXT: lzcntq %r15, %rsi +; AVX512-NEXT: addl $64, %esi +; AVX512-NEXT: testq %rbx, %rbx +; AVX512-NEXT: cmovnel %edi, %esi +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13 +; AVX512-NEXT: lzcntq %r13, %rbp +; AVX512-NEXT: addl $64, %ebp +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9 +; AVX512-NEXT: lzcntq %r9, %rdi +; AVX512-NEXT: testq %r9, %r9 +; AVX512-NEXT: cmovnel %edi, %ebp +; AVX512-NEXT: subl $-128, %ebp +; AVX512-NEXT: movq %r15, %rdi +; AVX512-NEXT: orq %rbx, %rdi +; AVX512-NEXT: cmovnel %esi, %ebp +; AVX512-NEXT: addl $256, %ebp # imm = 0x100 +; AVX512-NEXT: movq %r10, %rdi +; AVX512-NEXT: orq %r12, %rdi +; AVX512-NEXT: movq %rax, %rsi +; AVX512-NEXT: orq %r8, %rsi +; AVX512-NEXT: orq %rdi, %rsi +; AVX512-NEXT: cmovnel %ecx, %ebp +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi +; AVX512-NEXT: lzcntq %rdi, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX512-NEXT: lzcntq %r12, %rcx +; AVX512-NEXT: testq %r12, %r12 +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: lzcntq %r11, %rcx +; AVX512-NEXT: addl $64, %ecx +; AVX512-NEXT: lzcntq %r14, %rsi +; AVX512-NEXT: testq %r14, %r14 +; AVX512-NEXT: cmovnel %esi, %ecx +; AVX512-NEXT: subl $-128, %ecx +; AVX512-NEXT: movq %rdi, %rsi +; AVX512-NEXT: orq %r12, %rsi +; AVX512-NEXT: cmovnel %eax, %ecx +; AVX512-NEXT: movq %rdx, %rdi +; AVX512-NEXT: lzcntq %rdx, %rdx +; AVX512-NEXT: addl $64, %edx +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload +; AVX512-NEXT: lzcntq %r10, %rax +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: cmovnel %eax, %edx +; AVX512-NEXT: lzcntq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX512-NEXT: lzcntq %rsi, %r8 +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %r10, %rdi +; AVX512-NEXT: cmovnel %edx, %eax +; AVX512-NEXT: orq %r12, %r14 +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r11 +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r14, %r11 +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %rbx +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r9 +; AVX512-NEXT: orq %rbx, %r9 +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r15 +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r13 +; AVX512-NEXT: orq %r15, %r13 +; AVX512-NEXT: addl $512, %eax # imm = 0x200 +; AVX512-NEXT: orq %r9, %r13 +; AVX512-NEXT: cmovnel %ebp, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %cnt = call i1024 @llvm.ctlz.i1024(i1024 %a0, i1 -1) + %res = trunc i1024 %cnt to i32 + ret i32 %res +} + +define i32 @load_ctlz_undef_i1024(ptr %p0) nounwind { +; SSE-LABEL: load_ctlz_undef_i1024: +; SSE: # %bb.0: +; SSE-NEXT: pushq %rbp +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %r13 +; SSE-NEXT: pushq %r12 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq 40(%rdi), %rbp +; SSE-NEXT: movq 64(%rdi), %rbx +; SSE-NEXT: movq 72(%rdi), %r11 +; SSE-NEXT: movq 80(%rdi), %r12 +; SSE-NEXT: movq 88(%rdi), %r14 +; SSE-NEXT: movq 96(%rdi), %r13 +; SSE-NEXT: movq 104(%rdi), %r9 +; SSE-NEXT: movq 112(%rdi), %r10 +; SSE-NEXT: movq 120(%rdi), %r8 +; SSE-NEXT: bsrq %r8, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %r10, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: orl $64, %ecx +; SSE-NEXT: testq %r8, %r8 +; SSE-NEXT: cmovnel %eax, %ecx +; SSE-NEXT: bsrq %r9, %rdx +; SSE-NEXT: xorl $63, %edx +; SSE-NEXT: bsrq %r13, %rax +; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %r9, %r9 +; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: cmovnel %edx, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: movq %r10, %rdx +; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: orq %r8, %rdx +; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: bsrq %r14, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: movq %r12, %rsi +; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: bsrq %r12, %rdx +; SSE-NEXT: xorl $63, %edx +; SSE-NEXT: orl $64, %edx +; SSE-NEXT: testq %r14, %r14 +; SSE-NEXT: cmovnel %ecx, %edx +; SSE-NEXT: bsrq %r11, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: bsrq %rbx, %r15 +; SSE-NEXT: xorl $63, %r15d +; SSE-NEXT: orl $64, %r15d +; SSE-NEXT: testq %r11, %r11 +; SSE-NEXT: cmovnel %ecx, %r15d +; SSE-NEXT: movq 48(%rdi), %r12 +; SSE-NEXT: subl $-128, %r15d +; SSE-NEXT: movq %rsi, %rcx +; SSE-NEXT: orq %r14, %rcx +; SSE-NEXT: cmovnel %edx, %r15d +; SSE-NEXT: addl $256, %r15d # imm = 0x100 +; SSE-NEXT: movq %r9, %rcx +; SSE-NEXT: orq %r8, %rcx +; SSE-NEXT: movq %r13, %rdx +; SSE-NEXT: orq %r10, %rdx +; SSE-NEXT: orq %rcx, %rdx +; SSE-NEXT: movq 56(%rdi), %r13 +; SSE-NEXT: cmovnel %eax, %r15d +; SSE-NEXT: bsrq %r13, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: bsrq %r12, %rdx +; SSE-NEXT: xorl $63, %edx +; SSE-NEXT: orl $64, %edx +; SSE-NEXT: testq %r13, %r13 +; SSE-NEXT: cmovnel %eax, %edx +; SSE-NEXT: movq %rbp, %r10 +; SSE-NEXT: bsrq %rbp, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: movq 32(%rdi), %r8 +; SSE-NEXT: bsrq %r8, %rbp +; SSE-NEXT: xorl $63, %ebp +; SSE-NEXT: orl $64, %ebp +; SSE-NEXT: testq %r10, %r10 +; SSE-NEXT: cmovnel %eax, %ebp +; SSE-NEXT: subl $-128, %ebp +; SSE-NEXT: movq %r12, %rax +; SSE-NEXT: orq %r13, %rax +; SSE-NEXT: cmovnel %edx, %ebp +; SSE-NEXT: movq 24(%rdi), %r9 +; SSE-NEXT: bsrq %r9, %rax +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: movq 16(%rdi), %rsi +; SSE-NEXT: bsrq %rsi, %rcx +; SSE-NEXT: xorl $63, %ecx +; SSE-NEXT: orl $64, %ecx +; SSE-NEXT: testq %r9, %r9 +; SSE-NEXT: cmovnel %eax, %ecx +; SSE-NEXT: movq 8(%rdi), %rdx +; SSE-NEXT: bsrq (%rdi), %rax +; SSE-NEXT: bsrq %rdx, %rdi +; SSE-NEXT: xorl $63, %edi +; SSE-NEXT: xorl $63, %eax +; SSE-NEXT: orl $64, %eax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %edi, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %r9, %rsi +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: orq %r13, %r10 +; SSE-NEXT: orq %r12, %r8 +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r10, %r8 +; SSE-NEXT: cmovnel %ebp, %eax +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload +; SSE-NEXT: orq %r14, %r11 +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload +; SSE-NEXT: orq %rcx, %rbx +; SSE-NEXT: addl $512, %eax # imm = 0x200 +; SSE-NEXT: orq %r11, %rbx +; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r12 +; SSE-NEXT: popq %r13 +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: popq %rbp +; SSE-NEXT: retq +; +; AVX2-LABEL: load_ctlz_undef_i1024: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq 48(%rdi), %r9 +; AVX2-NEXT: movq 56(%rdi), %rbp +; AVX2-NEXT: movq 64(%rdi), %r11 +; AVX2-NEXT: movq 72(%rdi), %r10 +; AVX2-NEXT: movq 80(%rdi), %r14 +; AVX2-NEXT: movq 88(%rdi), %rbx +; AVX2-NEXT: movq 96(%rdi), %rdx +; AVX2-NEXT: movq 104(%rdi), %r8 +; AVX2-NEXT: movq 112(%rdi), %rsi +; AVX2-NEXT: movq 120(%rdi), %r15 +; AVX2-NEXT: lzcntq %r15, %rax +; AVX2-NEXT: lzcntq %rsi, %rcx +; AVX2-NEXT: addl $64, %ecx +; AVX2-NEXT: testq %r15, %r15 +; AVX2-NEXT: cmovnel %eax, %ecx +; AVX2-NEXT: xorl %r12d, %r12d +; AVX2-NEXT: lzcntq %r8, %r12 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rdx, %rax +; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: cmovnel %r12d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: movq %rsi, %r12 +; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: orq %r15, %r12 +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %rbx, %rcx +; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: xorl %r13d, %r13d +; AVX2-NEXT: lzcntq %r14, %r13 +; AVX2-NEXT: addl $64, %r13d +; AVX2-NEXT: testq %rbx, %rbx +; AVX2-NEXT: cmovnel %ecx, %r13d +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %r10, %rcx +; AVX2-NEXT: xorl %r12d, %r12d +; AVX2-NEXT: lzcntq %r11, %r12 +; AVX2-NEXT: addl $64, %r12d +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %ecx, %r12d +; AVX2-NEXT: subl $-128, %r12d +; AVX2-NEXT: movq %r14, %rcx +; AVX2-NEXT: orq %rbx, %rcx +; AVX2-NEXT: cmovnel %r13d, %r12d +; AVX2-NEXT: addl $256, %r12d # imm = 0x100 +; AVX2-NEXT: movq %r8, %rcx +; AVX2-NEXT: orq %r15, %rcx +; AVX2-NEXT: orq %rsi, %rdx +; AVX2-NEXT: orq %rcx, %rdx +; AVX2-NEXT: cmovnel %eax, %r12d +; AVX2-NEXT: movq %rbp, %r14 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %rbp, %rcx +; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %r9, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rbp, %rbp +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: movq 32(%rdi), %r13 +; AVX2-NEXT: xorl %ebp, %ebp +; AVX2-NEXT: lzcntq %r13, %rbp +; AVX2-NEXT: addl $64, %ebp +; AVX2-NEXT: movq 40(%rdi), %r8 +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: lzcntq %r8, %rdx +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %edx, %ebp +; AVX2-NEXT: subl $-128, %ebp +; AVX2-NEXT: movq %r9, %rdx +; AVX2-NEXT: orq %r14, %rdx +; AVX2-NEXT: cmovnel %eax, %ebp +; AVX2-NEXT: movq 16(%rdi), %r9 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: lzcntq %r9, %rcx +; AVX2-NEXT: addl $64, %ecx +; AVX2-NEXT: movq 24(%rdi), %rdx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq %rdx, %rax +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %ecx +; AVX2-NEXT: movq 8(%rdi), %rsi +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: lzcntq (%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: lzcntq %rsi, %rdi +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %edi, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rdx, %r9 +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: orq %r14, %r8 +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r8, %r13 +; AVX2-NEXT: cmovnel %ebp, %eax +; AVX2-NEXT: orq %r15, %rbx +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload +; AVX2-NEXT: orq %rbx, %r10 +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload +; AVX2-NEXT: orq %rcx, %r11 +; AVX2-NEXT: addl $512, %eax # imm = 0x200 +; AVX2-NEXT: orq %r10, %r11 +; AVX2-NEXT: cmovnel %r12d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_ctlz_undef_i1024: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq 32(%rdi), %r14 +; AVX512-NEXT: movq 48(%rdi), %rbp +; AVX512-NEXT: movq 64(%rdi), %r11 +; AVX512-NEXT: movq 72(%rdi), %r10 +; AVX512-NEXT: movq 80(%rdi), %rdx +; AVX512-NEXT: movq 88(%rdi), %rbx +; AVX512-NEXT: movq 96(%rdi), %rsi +; AVX512-NEXT: movq 104(%rdi), %r9 +; AVX512-NEXT: movq 112(%rdi), %r8 +; AVX512-NEXT: movq 120(%rdi), %r15 +; AVX512-NEXT: lzcntq %r15, %rax +; AVX512-NEXT: lzcntq %r8, %rcx +; AVX512-NEXT: addl $64, %ecx +; AVX512-NEXT: testq %r15, %r15 +; AVX512-NEXT: cmovnel %eax, %ecx +; AVX512-NEXT: lzcntq %r9, %r12 +; AVX512-NEXT: lzcntq %rsi, %rax +; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %r9, %r9 +; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: cmovnel %r12d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: movq %r8, %r12 +; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: orq %r15, %r12 +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: lzcntq %rbx, %rcx +; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: lzcntq %rdx, %r13 +; AVX512-NEXT: addl $64, %r13d +; AVX512-NEXT: testq %rbx, %rbx +; AVX512-NEXT: cmovnel %ecx, %r13d +; AVX512-NEXT: lzcntq %r10, %rcx +; AVX512-NEXT: lzcntq %r11, %r12 +; AVX512-NEXT: addl $64, %r12d +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: cmovnel %ecx, %r12d +; AVX512-NEXT: subl $-128, %r12d +; AVX512-NEXT: movq %rdx, %rcx +; AVX512-NEXT: orq %rbx, %rcx +; AVX512-NEXT: cmovnel %r13d, %r12d +; AVX512-NEXT: addl $256, %r12d # imm = 0x100 +; AVX512-NEXT: movq %r9, %rcx +; AVX512-NEXT: orq %r15, %rcx +; AVX512-NEXT: orq %r8, %rsi +; AVX512-NEXT: orq %rcx, %rsi +; AVX512-NEXT: movq 56(%rdi), %r13 +; AVX512-NEXT: cmovnel %eax, %r12d +; AVX512-NEXT: lzcntq %r13, %rcx +; AVX512-NEXT: movq %rbp, %rsi +; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: lzcntq %rbp, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %r13, %r13 +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: lzcntq %r14, %rbp +; AVX512-NEXT: addl $64, %ebp +; AVX512-NEXT: movq 40(%rdi), %r8 +; AVX512-NEXT: lzcntq %r8, %rdx +; AVX512-NEXT: testq %r8, %r8 +; AVX512-NEXT: cmovnel %edx, %ebp +; AVX512-NEXT: subl $-128, %ebp +; AVX512-NEXT: movq %rsi, %rdx +; AVX512-NEXT: orq %r13, %rdx +; AVX512-NEXT: cmovnel %eax, %ebp +; AVX512-NEXT: movq 16(%rdi), %r9 +; AVX512-NEXT: lzcntq %r9, %rcx +; AVX512-NEXT: addl $64, %ecx +; AVX512-NEXT: movq 24(%rdi), %rdx +; AVX512-NEXT: lzcntq %rdx, %rax +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %ecx +; AVX512-NEXT: movq 8(%rdi), %rsi +; AVX512-NEXT: lzcntq (%rdi), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: lzcntq %rsi, %rdi +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %edi, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %rdx, %r9 +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: orq %r13, %r8 +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r8, %r14 +; AVX512-NEXT: cmovnel %ebp, %eax +; AVX512-NEXT: orq %r15, %rbx +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload +; AVX512-NEXT: orq %rbx, %r10 +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload +; AVX512-NEXT: orq %rcx, %r11 +; AVX512-NEXT: addl $512, %eax # imm = 0x200 +; AVX512-NEXT: orq %r10, %r11 +; AVX512-NEXT: cmovnel %r12d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %a0 = load i1024, ptr %p0 + %cnt = call i1024 @llvm.ctlz.i1024(i1024 %a0, i1 -1) + %res = trunc i1024 %cnt to i32 + ret i32 %res +} + ; ; CTTZ ; -define i32 @test_cttz_i128(i128 %a0) nounwind { -; SSE-LABEL: test_cttz_i128: +define i32 @test_cttz_i128(i128 %a0) nounwind { +; SSE-LABEL: test_cttz_i128: +; SSE: # %bb.0: +; SSE-NEXT: rep bsfq %rdi, %rcx +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq %rsi, %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: test_cttz_i128: +; AVX2: # %bb.0: +; AVX2-NEXT: tzcntq %rdi, %rcx +; AVX2-NEXT: tzcntq %rsi, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rdi, %rdi +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_cttz_i128: +; AVX512: # %bb.0: +; AVX512-NEXT: tzcntq %rdi, %rcx +; AVX512-NEXT: tzcntq %rsi, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rdi, %rdi +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0) + %res = trunc i128 %cnt to i32 + ret i32 %res +} + +define i32 @load_cttz_i128(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_i128: +; SSE: # %bb.0: +; SSE-NEXT: movq (%rdi), %rcx +; SSE-NEXT: rep bsfq %rcx, %rdx +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq 8(%rdi), %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %edx, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: load_cttz_i128: +; AVX2: # %bb.0: +; AVX2-NEXT: movq (%rdi), %rcx +; AVX2-NEXT: tzcntq %rcx, %rdx +; AVX2-NEXT: tzcntq 8(%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %edx, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_cttz_i128: +; AVX512: # %bb.0: +; AVX512-NEXT: movq (%rdi), %rcx +; AVX512-NEXT: tzcntq %rcx, %rdx +; AVX512-NEXT: tzcntq 8(%rdi), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %edx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %a0 = load i128, ptr %p0 + %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0) + %res = trunc i128 %cnt to i32 + ret i32 %res +} + +define i32 @test_cttz_i256(i256 %a0) nounwind { +; SSE-LABEL: test_cttz_i256: +; SSE: # %bb.0: +; SSE-NEXT: rep bsfq %rdi, %rax +; SSE-NEXT: rep bsfq %rsi, %r8 +; SSE-NEXT: addl $64, %r8d +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: cmovnel %eax, %r8d +; SSE-NEXT: rep bsfq %rdx, %r9 +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq %rcx, %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %r9d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %rsi, %rdi +; SSE-NEXT: cmovnel %r8d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: test_cttz_i256: +; AVX2: # %bb.0: +; AVX2-NEXT: tzcntq %rdi, %rax +; AVX2-NEXT: tzcntq %rsi, %r8 +; AVX2-NEXT: addl $64, %r8d +; AVX2-NEXT: testq %rdi, %rdi +; AVX2-NEXT: cmovnel %eax, %r8d +; AVX2-NEXT: tzcntq %rdx, %r9 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rcx, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %r9d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rsi, %rdi +; AVX2-NEXT: cmovnel %r8d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_cttz_i256: +; AVX512: # %bb.0: +; AVX512-NEXT: tzcntq %rdi, %rax +; AVX512-NEXT: tzcntq %rsi, %r8 +; AVX512-NEXT: addl $64, %r8d +; AVX512-NEXT: testq %rdi, %rdi +; AVX512-NEXT: cmovnel %eax, %r8d +; AVX512-NEXT: tzcntq %rdx, %r9 +; AVX512-NEXT: tzcntq %rcx, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %r9d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %rsi, %rdi +; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0) + %res = trunc i256 %cnt to i32 + ret i32 %res +} + +define i32 @load_cttz_i256(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_i256: +; SSE: # %bb.0: +; SSE-NEXT: movq 16(%rdi), %rcx +; SSE-NEXT: movq (%rdi), %rdx +; SSE-NEXT: movq 8(%rdi), %rsi +; SSE-NEXT: rep bsfq %rdx, %rax +; SSE-NEXT: rep bsfq %rsi, %r8 +; SSE-NEXT: addl $64, %r8d +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %eax, %r8d +; SSE-NEXT: rep bsfq %rcx, %r9 +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq 24(%rdi), %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %r9d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %rsi, %rdx +; SSE-NEXT: cmovnel %r8d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX2-LABEL: load_cttz_i256: +; AVX2: # %bb.0: +; AVX2-NEXT: movq (%rdi), %rcx +; AVX2-NEXT: movq 8(%rdi), %rdx +; AVX2-NEXT: tzcntq %rcx, %rax +; AVX2-NEXT: tzcntq %rdx, %rsi +; AVX2-NEXT: addl $64, %esi +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %eax, %esi +; AVX2-NEXT: movq 16(%rdi), %r8 +; AVX2-NEXT: tzcntq %r8, %r9 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq 24(%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %r9d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rdx, %rcx +; AVX2-NEXT: cmovnel %esi, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_cttz_i256: +; AVX512: # %bb.0: +; AVX512-NEXT: movq 16(%rdi), %rcx +; AVX512-NEXT: movq (%rdi), %rdx +; AVX512-NEXT: movq 8(%rdi), %rsi +; AVX512-NEXT: tzcntq %rdx, %rax +; AVX512-NEXT: tzcntq %rsi, %r8 +; AVX512-NEXT: addl $64, %r8d +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %r8d +; AVX512-NEXT: tzcntq %rcx, %r9 +; AVX512-NEXT: tzcntq 24(%rdi), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %r9d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %rsi, %rdx +; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: retq + %a0 = load i256, ptr %p0 + %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0) + %res = trunc i256 %cnt to i32 + ret i32 %res +} + +define i32 @test_cttz_i512(i512 %a0) nounwind { +; SSE-LABEL: test_cttz_i512: +; SSE: # %bb.0: +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: rep bsfq %rdi, %rax +; SSE-NEXT: rep bsfq %rsi, %r11 +; SSE-NEXT: addl $64, %r11d +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: cmovnel %eax, %r11d +; SSE-NEXT: rep bsfq %rdx, %rax +; SSE-NEXT: rep bsfq %rcx, %r10 +; SSE-NEXT: addl $64, %r10d +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %eax, %r10d +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx +; SSE-NEXT: subl $-128, %r10d +; SSE-NEXT: movq %rdi, %rax +; SSE-NEXT: orq %rsi, %rax +; SSE-NEXT: cmovnel %r11d, %r10d +; SSE-NEXT: rep bsfq %r8, %rax +; SSE-NEXT: rep bsfq %r9, %r11 +; SSE-NEXT: addl $64, %r11d +; SSE-NEXT: testq %r8, %r8 +; SSE-NEXT: cmovnel %eax, %r11d +; SSE-NEXT: rep bsfq %rbx, %r14 +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %rbx, %rbx +; SSE-NEXT: cmovnel %r14d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %r9, %r8 +; SSE-NEXT: cmovnel %r11d, %eax +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %rcx, %rsi +; SSE-NEXT: orq %rdx, %rdi +; SSE-NEXT: orq %rsi, %rdi +; SSE-NEXT: cmovnel %r10d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r14 +; SSE-NEXT: retq +; +; AVX2-LABEL: test_cttz_i512: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: tzcntq %rdi, %rax +; AVX2-NEXT: tzcntq %rsi, %r11 +; AVX2-NEXT: addl $64, %r11d +; AVX2-NEXT: testq %rdi, %rdi +; AVX2-NEXT: cmovnel %eax, %r11d +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rdx, %rax +; AVX2-NEXT: tzcntq %rcx, %r10 +; AVX2-NEXT: addl $64, %r10d +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %r10d +; AVX2-NEXT: subl $-128, %r10d +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: orq %rsi, %rax +; AVX2-NEXT: cmovnel %r11d, %r10d +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %r8, %rax +; AVX2-NEXT: xorl %ebx, %ebx +; AVX2-NEXT: tzcntq %r9, %rbx +; AVX2-NEXT: addl $64, %ebx +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %eax, %ebx +; AVX2-NEXT: xorl %r14d, %r14d +; AVX2-NEXT: tzcntq %r11, %r14 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r11, %r11 +; AVX2-NEXT: cmovnel %r14d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %r9, %r8 +; AVX2-NEXT: cmovnel %ebx, %eax +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %rcx, %rsi +; AVX2-NEXT: orq %rdx, %rdi +; AVX2-NEXT: orq %rsi, %rdi +; AVX2-NEXT: cmovnel %r10d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_cttz_i512: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; AVX512-NEXT: tzcntq %rdi, %rax +; AVX512-NEXT: tzcntq %rsi, %rbx +; AVX512-NEXT: addl $64, %ebx +; AVX512-NEXT: testq %rdi, %rdi +; AVX512-NEXT: cmovnel %eax, %ebx +; AVX512-NEXT: tzcntq %rdx, %rax +; AVX512-NEXT: tzcntq %rcx, %r10 +; AVX512-NEXT: addl $64, %r10d +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %r10d +; AVX512-NEXT: subl $-128, %r10d +; AVX512-NEXT: movq %rdi, %rax +; AVX512-NEXT: orq %rsi, %rax +; AVX512-NEXT: cmovnel %ebx, %r10d +; AVX512-NEXT: tzcntq %r8, %rax +; AVX512-NEXT: tzcntq %r9, %rbx +; AVX512-NEXT: addl $64, %ebx +; AVX512-NEXT: testq %r8, %r8 +; AVX512-NEXT: cmovnel %eax, %ebx +; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax +; AVX512-NEXT: tzcntq %r11, %r14 +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %r11, %r11 +; AVX512-NEXT: cmovnel %r14d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %r9, %r8 +; AVX512-NEXT: cmovnel %ebx, %eax +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %rcx, %rsi +; AVX512-NEXT: orq %rdx, %rdi +; AVX512-NEXT: orq %rsi, %rdi +; AVX512-NEXT: cmovnel %r10d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: retq + %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0) + %res = trunc i512 %cnt to i32 + ret i32 %res +} + +define i32 @load_cttz_i512(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_i512: +; SSE: # %bb.0: +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq 48(%rdi), %r10 +; SSE-NEXT: movq 40(%rdi), %r9 +; SSE-NEXT: movq 24(%rdi), %r8 +; SSE-NEXT: movq 16(%rdi), %rdx +; SSE-NEXT: movq (%rdi), %rcx +; SSE-NEXT: movq 8(%rdi), %rsi +; SSE-NEXT: rep bsfq %rcx, %rax +; SSE-NEXT: rep bsfq %rsi, %rbx +; SSE-NEXT: addl $64, %ebx +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %eax, %ebx +; SSE-NEXT: rep bsfq %rdx, %rax +; SSE-NEXT: rep bsfq %r8, %r11 +; SSE-NEXT: addl $64, %r11d +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %eax, %r11d +; SSE-NEXT: movq 32(%rdi), %r14 +; SSE-NEXT: subl $-128, %r11d +; SSE-NEXT: movq %rcx, %rax +; SSE-NEXT: orq %rsi, %rax +; SSE-NEXT: cmovnel %ebx, %r11d +; SSE-NEXT: rep bsfq %r14, %rax +; SSE-NEXT: rep bsfq %r9, %rbx +; SSE-NEXT: addl $64, %ebx +; SSE-NEXT: testq %r14, %r14 +; SSE-NEXT: cmovnel %eax, %ebx +; SSE-NEXT: rep bsfq %r10, %r15 +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq 56(%rdi), %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %r10, %r10 +; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %r9, %r14 +; SSE-NEXT: cmovnel %ebx, %eax +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r8, %rsi +; SSE-NEXT: orq %rdx, %rcx +; SSE-NEXT: orq %rsi, %rcx +; SSE-NEXT: cmovnel %r11d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: retq +; +; AVX2-LABEL: load_cttz_i512: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq 48(%rdi), %r10 +; AVX2-NEXT: movq 40(%rdi), %r9 +; AVX2-NEXT: movq 24(%rdi), %r8 +; AVX2-NEXT: movq 16(%rdi), %rdx +; AVX2-NEXT: movq (%rdi), %rcx +; AVX2-NEXT: movq 8(%rdi), %rsi +; AVX2-NEXT: tzcntq %rcx, %rax +; AVX2-NEXT: xorl %ebx, %ebx +; AVX2-NEXT: tzcntq %rsi, %rbx +; AVX2-NEXT: addl $64, %ebx +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %eax, %ebx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rdx, %rax +; AVX2-NEXT: tzcntq %r8, %r11 +; AVX2-NEXT: addl $64, %r11d +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %r11d +; AVX2-NEXT: subl $-128, %r11d +; AVX2-NEXT: movq %rcx, %rax +; AVX2-NEXT: orq %rsi, %rax +; AVX2-NEXT: cmovnel %ebx, %r11d +; AVX2-NEXT: movq 32(%rdi), %rbx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rbx, %rax +; AVX2-NEXT: xorl %r14d, %r14d +; AVX2-NEXT: tzcntq %r9, %r14 +; AVX2-NEXT: addl $64, %r14d +; AVX2-NEXT: testq %rbx, %rbx +; AVX2-NEXT: cmovnel %eax, %r14d +; AVX2-NEXT: xorl %r15d, %r15d +; AVX2-NEXT: tzcntq %r10, %r15 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq 56(%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %r15d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %r9, %rbx +; AVX2-NEXT: cmovnel %r14d, %eax +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r8, %rsi +; AVX2-NEXT: orq %rdx, %rcx +; AVX2-NEXT: orq %rsi, %rcx +; AVX2-NEXT: cmovnel %r11d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_cttz_i512: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq 48(%rdi), %r11 +; AVX512-NEXT: movq 40(%rdi), %r9 +; AVX512-NEXT: movq 32(%rdi), %r10 +; AVX512-NEXT: movq 24(%rdi), %r8 +; AVX512-NEXT: movq 16(%rdi), %rdx +; AVX512-NEXT: movq (%rdi), %rcx +; AVX512-NEXT: movq 8(%rdi), %rsi +; AVX512-NEXT: tzcntq %rcx, %rax +; AVX512-NEXT: tzcntq %rsi, %r14 +; AVX512-NEXT: addl $64, %r14d +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %eax, %r14d +; AVX512-NEXT: tzcntq %rdx, %rax +; AVX512-NEXT: tzcntq %r8, %rbx +; AVX512-NEXT: addl $64, %ebx +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %ebx +; AVX512-NEXT: subl $-128, %ebx +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: orq %rsi, %rax +; AVX512-NEXT: cmovnel %r14d, %ebx +; AVX512-NEXT: tzcntq %r10, %rax +; AVX512-NEXT: tzcntq %r9, %r14 +; AVX512-NEXT: addl $64, %r14d +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: cmovnel %eax, %r14d +; AVX512-NEXT: tzcntq 56(%rdi), %rax +; AVX512-NEXT: tzcntq %r11, %rdi +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %r11, %r11 +; AVX512-NEXT: cmovnel %edi, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %r9, %r10 +; AVX512-NEXT: cmovnel %r14d, %eax +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r8, %rsi +; AVX512-NEXT: orq %rdx, %rcx +; AVX512-NEXT: orq %rsi, %rcx +; AVX512-NEXT: cmovnel %ebx, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: retq + %a0 = load i512, ptr %p0 + %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0) + %res = trunc i512 %cnt to i32 + ret i32 %res +} + +define i32 @test_cttz_i1024(i1024 %a0) nounwind { +; SSE-LABEL: test_cttz_i1024: +; SSE: # %bb.0: +; SSE-NEXT: pushq %rbp +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %r13 +; SSE-NEXT: pushq %r12 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq %r9, %r13 +; SSE-NEXT: movq %r8, %r14 +; SSE-NEXT: movq %rcx, %rbx +; SSE-NEXT: movq %rdx, %r10 +; SSE-NEXT: movq %rsi, %r9 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: rep bsfq %rdi, %rax +; SSE-NEXT: rep bsfq %r9, %r15 +; SSE-NEXT: addl $64, %r15d +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: cmovnel %eax, %r15d +; SSE-NEXT: rep bsfq %r10, %r12 +; SSE-NEXT: rep bsfq %rcx, %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %r10, %r10 +; SSE-NEXT: cmovnel %r12d, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: movq %rdi, %r12 +; SSE-NEXT: orq %r9, %r12 +; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: rep bsfq %r8, %r15 +; SSE-NEXT: movq %r13, %rcx +; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: rep bsfq %r13, %r13 +; SSE-NEXT: addl $64, %r13d +; SSE-NEXT: testq %r8, %r8 +; SSE-NEXT: cmovnel %r15d, %r13d +; SSE-NEXT: rep bsfq %rdx, %r12 +; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r15 +; SSE-NEXT: addl $64, %r15d +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %r12d, %r15d +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; SSE-NEXT: subl $-128, %r15d +; SSE-NEXT: movq %r8, %rbp +; SSE-NEXT: orq %rcx, %rbp +; SSE-NEXT: cmovnel %r13d, %r15d +; SSE-NEXT: addl $256, %r15d # imm = 0x100 +; SSE-NEXT: movq %r9, %r13 +; SSE-NEXT: orq %rbx, %r13 +; SSE-NEXT: movq %rdi, %rbp +; SSE-NEXT: orq %r10, %rbp +; SSE-NEXT: orq %r13, %rbp +; SSE-NEXT: cmovnel %eax, %r15d +; SSE-NEXT: rep bsfq %r11, %r13 +; SSE-NEXT: rep bsfq %r12, %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %r11, %r11 +; SSE-NEXT: cmovnel %r13d, %eax +; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r13 +; SSE-NEXT: addl $64, %r13d +; SSE-NEXT: rep bsfq %rsi, %rcx +; SSE-NEXT: testq %rsi, %rsi +; SSE-NEXT: cmovnel %ecx, %r13d +; SSE-NEXT: subl $-128, %r13d +; SSE-NEXT: movq %r11, %rcx +; SSE-NEXT: orq %r12, %rcx +; SSE-NEXT: cmovnel %eax, %r13d +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbp +; SSE-NEXT: rep bsfq %rbp, %rcx +; SSE-NEXT: addl $64, %ecx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: rep bsfq %rdx, %rax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %eax, %ecx +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; SSE-NEXT: rep bsfq %r8, %rsi +; SSE-NEXT: testq %r8, %r8 +; SSE-NEXT: cmovnel %esi, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %rbp, %rdx +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r12 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r11 +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r12, %r11 +; SSE-NEXT: cmovnel %r13d, %eax +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %rbx +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload +; SSE-NEXT: orq %rbx, %r9 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10 +; SSE-NEXT: orq %r14, %rdi +; SSE-NEXT: orq %r10, %rdi +; SSE-NEXT: addl $512, %eax # imm = 0x200 +; SSE-NEXT: orq %r9, %rdi +; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r12 +; SSE-NEXT: popq %r13 +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: popq %rbp +; SSE-NEXT: retq +; +; AVX2-LABEL: test_cttz_i1024: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq %r9, %rbx +; AVX2-NEXT: movq %r8, %r14 +; AVX2-NEXT: movq %rcx, %r11 +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: movq %rsi, %r9 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; AVX2-NEXT: tzcntq %rdi, %rax +; AVX2-NEXT: xorl %r15d, %r15d +; AVX2-NEXT: tzcntq %r9, %r15 +; AVX2-NEXT: addl $64, %r15d +; AVX2-NEXT: testq %rdi, %rdi +; AVX2-NEXT: cmovnel %eax, %r15d +; AVX2-NEXT: xorl %r12d, %r12d +; AVX2-NEXT: tzcntq %r10, %r12 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %r11, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %r12d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: movq %rdi, %r12 +; AVX2-NEXT: orq %r9, %r12 +; AVX2-NEXT: cmovnel %r15d, %eax +; AVX2-NEXT: xorl %r15d, %r15d +; AVX2-NEXT: tzcntq %r14, %r15 +; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: xorl %r12d, %r12d +; AVX2-NEXT: tzcntq %rbx, %r12 +; AVX2-NEXT: addl $64, %r12d +; AVX2-NEXT: testq %r14, %r14 +; AVX2-NEXT: cmovnel %r15d, %r12d +; AVX2-NEXT: xorl %r13d, %r13d +; AVX2-NEXT: tzcntq %rcx, %r13 +; AVX2-NEXT: xorl %r15d, %r15d +; AVX2-NEXT: tzcntq %rdx, %r15 +; AVX2-NEXT: addl $64, %r15d +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %r13d, %r15d +; AVX2-NEXT: subl $-128, %r15d +; AVX2-NEXT: movq %r14, %r13 +; AVX2-NEXT: orq %rbx, %r13 +; AVX2-NEXT: cmovnel %r12d, %r15d +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX2-NEXT: addl $256, %r15d # imm = 0x100 +; AVX2-NEXT: movq %r9, %r13 +; AVX2-NEXT: orq %r11, %r13 +; AVX2-NEXT: movq %rdi, %rbp +; AVX2-NEXT: orq %r10, %rbp +; AVX2-NEXT: orq %r13, %rbp +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13 +; AVX2-NEXT: cmovnel %eax, %r15d +; AVX2-NEXT: xorl %ebp, %ebp +; AVX2-NEXT: tzcntq %r12, %rbp +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %r13, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r12, %r12 +; AVX2-NEXT: cmovnel %ebp, %eax +; AVX2-NEXT: xorl %ebp, %ebp +; AVX2-NEXT: tzcntq %r8, %rbp +; AVX2-NEXT: addl $64, %ebp +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: tzcntq %rsi, %rcx +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %ecx, %ebp +; AVX2-NEXT: subl $-128, %ebp +; AVX2-NEXT: movq %r12, %rcx +; AVX2-NEXT: orq %r13, %rcx +; AVX2-NEXT: cmovnel %eax, %ebp +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: tzcntq %rbx, %rcx +; AVX2-NEXT: addl $64, %ecx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rdx, %rax +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %ecx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; AVX2-NEXT: tzcntq %r8, %rsi +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %esi, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %rbx, %rdx +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r13 +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r12 +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r13, %r12 +; AVX2-NEXT: cmovnel %ebp, %eax +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r11 +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload +; AVX2-NEXT: orq %r11, %r9 +; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r10 +; AVX2-NEXT: orq %r14, %rdi +; AVX2-NEXT: orq %r10, %rdi +; AVX2-NEXT: addl $512, %eax # imm = 0x200 +; AVX2-NEXT: orq %r9, %rdi +; AVX2-NEXT: cmovnel %r15d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_cttz_i1024: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq %r9, %r14 +; AVX512-NEXT: movq %r8, %r15 +; AVX512-NEXT: movq %rcx, %r11 +; AVX512-NEXT: movq %rdx, %r10 +; AVX512-NEXT: movq %rsi, %r9 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; AVX512-NEXT: tzcntq %rdi, %rax +; AVX512-NEXT: tzcntq %r9, %r12 +; AVX512-NEXT: addl $64, %r12d +; AVX512-NEXT: testq %rdi, %rdi +; AVX512-NEXT: cmovnel %eax, %r12d +; AVX512-NEXT: tzcntq %rdx, %r13 +; AVX512-NEXT: tzcntq %r11, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %r13d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: movq %rdi, %r13 +; AVX512-NEXT: orq %r9, %r13 +; AVX512-NEXT: cmovnel %r12d, %eax +; AVX512-NEXT: tzcntq %r8, %r12 +; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: tzcntq %r14, %r13 +; AVX512-NEXT: addl $64, %r13d +; AVX512-NEXT: testq %r8, %r8 +; AVX512-NEXT: cmovnel %r12d, %r13d +; AVX512-NEXT: tzcntq %rcx, %rbp +; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %r12 +; AVX512-NEXT: addl $64, %r12d +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %ebp, %r12d +; AVX512-NEXT: subl $-128, %r12d +; AVX512-NEXT: movq %r8, %rbp +; AVX512-NEXT: orq %r14, %rbp +; AVX512-NEXT: cmovnel %r13d, %r12d +; AVX512-NEXT: addl $256, %r12d # imm = 0x100 +; AVX512-NEXT: movq %r9, %r13 +; AVX512-NEXT: orq %r11, %r13 +; AVX512-NEXT: movq %rdi, %rbp +; AVX512-NEXT: orq %rdx, %rbp +; AVX512-NEXT: orq %r13, %rbp +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13 +; AVX512-NEXT: cmovnel %eax, %r12d +; AVX512-NEXT: tzcntq %rbx, %rbp +; AVX512-NEXT: tzcntq %r13, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rbx, %rbx +; AVX512-NEXT: cmovnel %ebp, %eax +; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rbp +; AVX512-NEXT: addl $64, %ebp +; AVX512-NEXT: tzcntq %rsi, %rcx +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %ecx, %ebp +; AVX512-NEXT: subl $-128, %ebp +; AVX512-NEXT: movq %rbx, %rcx +; AVX512-NEXT: orq %r13, %rcx +; AVX512-NEXT: cmovnel %eax, %ebp +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; AVX512-NEXT: tzcntq %r14, %rcx +; AVX512-NEXT: addl $64, %ecx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; AVX512-NEXT: tzcntq %rdx, %rax +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %ecx +; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8 +; AVX512-NEXT: tzcntq %r8, %rsi +; AVX512-NEXT: testq %r8, %r8 +; AVX512-NEXT: cmovnel %esi, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %r14, %rdx +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r13 +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %rbx +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r13, %rbx +; AVX512-NEXT: cmovnel %ebp, %eax +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r11 +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload +; AVX512-NEXT: orq %r11, %r9 +; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r10 +; AVX512-NEXT: orq %r15, %rdi +; AVX512-NEXT: orq %r10, %rdi +; AVX512-NEXT: addl $512, %eax # imm = 0x200 +; AVX512-NEXT: orq %r9, %rdi +; AVX512-NEXT: cmovnel %r12d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0) + %res = trunc i1024 %cnt to i32 + ret i32 %res +} + +define i32 @load_cttz_i1024(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_i1024: +; SSE: # %bb.0: +; SSE-NEXT: pushq %rbp +; SSE-NEXT: pushq %r15 +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %r13 +; SSE-NEXT: pushq %r12 +; SSE-NEXT: pushq %rbx +; SSE-NEXT: movq 88(%rdi), %r10 +; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 56(%rdi), %rcx +; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 40(%rdi), %rsi +; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 24(%rdi), %r9 +; SSE-NEXT: movq 16(%rdi), %r15 +; SSE-NEXT: movq (%rdi), %r8 +; SSE-NEXT: movq 8(%rdi), %r11 +; SSE-NEXT: rep bsfq %r8, %rax +; SSE-NEXT: rep bsfq %r11, %rdx +; SSE-NEXT: addl $64, %edx +; SSE-NEXT: testq %r8, %r8 +; SSE-NEXT: cmovnel %eax, %edx +; SSE-NEXT: rep bsfq %r15, %rbx +; SSE-NEXT: rep bsfq %r9, %rax +; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %r15, %r15 +; SSE-NEXT: cmovnel %ebx, %eax +; SSE-NEXT: movq 32(%rdi), %rbx +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: movq %r8, %r14 +; SSE-NEXT: orq %r11, %r14 +; SSE-NEXT: cmovnel %edx, %eax +; SSE-NEXT: rep bsfq %rbx, %rdx +; SSE-NEXT: rep bsfq %rsi, %r12 +; SSE-NEXT: addl $64, %r12d +; SSE-NEXT: testq %rbx, %rbx +; SSE-NEXT: cmovnel %edx, %r12d +; SSE-NEXT: movq 48(%rdi), %r13 +; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: rep bsfq %r13, %rdx +; SSE-NEXT: rep bsfq %rcx, %r14 +; SSE-NEXT: addl $64, %r14d +; SSE-NEXT: testq %r13, %r13 +; SSE-NEXT: cmovnel %edx, %r14d +; SSE-NEXT: subl $-128, %r14d +; SSE-NEXT: movq %rbx, %rdx +; SSE-NEXT: orq %rsi, %rdx +; SSE-NEXT: cmovnel %r12d, %r14d +; SSE-NEXT: movq 72(%rdi), %r12 +; SSE-NEXT: addl $256, %r14d # imm = 0x100 +; SSE-NEXT: movq %r11, %rdx +; SSE-NEXT: orq %r9, %rdx +; SSE-NEXT: movq %r8, %r13 +; SSE-NEXT: orq %r15, %r13 +; SSE-NEXT: orq %rdx, %r13 +; SSE-NEXT: movq 64(%rdi), %r13 +; SSE-NEXT: cmovnel %eax, %r14d +; SSE-NEXT: rep bsfq %r13, %rdx +; SSE-NEXT: rep bsfq %r12, %rax +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: testq %r13, %r13 +; SSE-NEXT: cmovnel %edx, %eax +; SSE-NEXT: rep bsfq %r10, %rbp +; SSE-NEXT: addl $64, %ebp +; SSE-NEXT: movq 80(%rdi), %r10 +; SSE-NEXT: rep bsfq %r10, %rcx +; SSE-NEXT: testq %r10, %r10 +; SSE-NEXT: cmovnel %ecx, %ebp +; SSE-NEXT: subl $-128, %ebp +; SSE-NEXT: movq %r13, %rcx +; SSE-NEXT: orq %r12, %rcx +; SSE-NEXT: cmovnel %eax, %ebp +; SSE-NEXT: movq 104(%rdi), %r9 +; SSE-NEXT: rep bsfq %r9, %rcx +; SSE-NEXT: addl $64, %ecx +; SSE-NEXT: movq 96(%rdi), %rdx +; SSE-NEXT: rep bsfq %rdx, %rax +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %eax, %ecx +; SSE-NEXT: movl $64, %eax +; SSE-NEXT: rep bsfq 120(%rdi), %rax +; SSE-NEXT: movq 112(%rdi), %rdi +; SSE-NEXT: addl $64, %eax +; SSE-NEXT: rep bsfq %rdi, %rsi +; SSE-NEXT: testq %rdi, %rdi +; SSE-NEXT: cmovnel %esi, %eax +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: orq %r9, %rdx +; SSE-NEXT: cmovnel %ecx, %eax +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload +; SSE-NEXT: orq %r10, %r13 +; SSE-NEXT: addl $256, %eax # imm = 0x100 +; SSE-NEXT: orq %r12, %r13 +; SSE-NEXT: cmovnel %ebp, %eax +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload +; SSE-NEXT: orq %rcx, %r11 +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload +; SSE-NEXT: orq %rbx, %r8 +; SSE-NEXT: orq %r15, %r8 +; SSE-NEXT: addl $512, %eax # imm = 0x200 +; SSE-NEXT: orq %r11, %r8 +; SSE-NEXT: cmovnel %r14d, %eax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r12 +; SSE-NEXT: popq %r13 +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %r15 +; SSE-NEXT: popq %rbp +; SSE-NEXT: retq +; +; AVX2-LABEL: load_cttz_i1024: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq 72(%rdi), %r14 +; AVX2-NEXT: movq 64(%rdi), %r15 +; AVX2-NEXT: movq 56(%rdi), %r9 +; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq 48(%rdi), %rcx +; AVX2-NEXT: movq 40(%rdi), %r10 +; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq 32(%rdi), %rsi +; AVX2-NEXT: movq 24(%rdi), %rbp +; AVX2-NEXT: movq 16(%rdi), %rbx +; AVX2-NEXT: movq (%rdi), %r8 +; AVX2-NEXT: movq 8(%rdi), %r11 +; AVX2-NEXT: tzcntq %r8, %rax +; AVX2-NEXT: tzcntq %r11, %rdx +; AVX2-NEXT: addl $64, %edx +; AVX2-NEXT: testq %r8, %r8 +; AVX2-NEXT: cmovnel %eax, %edx +; AVX2-NEXT: xorl %r12d, %r12d +; AVX2-NEXT: tzcntq %rbx, %r12 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rbp, %rax +; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %rbx, %rbx +; AVX2-NEXT: cmovnel %r12d, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: movq %r8, %r12 +; AVX2-NEXT: orq %r11, %r12 +; AVX2-NEXT: cmovnel %edx, %eax +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: tzcntq %rsi, %rdx +; AVX2-NEXT: xorl %r13d, %r13d +; AVX2-NEXT: tzcntq %r10, %r13 +; AVX2-NEXT: addl $64, %r13d +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: cmovnel %edx, %r13d +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: tzcntq %rcx, %rdx +; AVX2-NEXT: xorl %r12d, %r12d +; AVX2-NEXT: tzcntq %r9, %r12 +; AVX2-NEXT: addl $64, %r12d +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: cmovnel %edx, %r12d +; AVX2-NEXT: subl $-128, %r12d +; AVX2-NEXT: movq %rsi, %rdx +; AVX2-NEXT: orq %r10, %rdx +; AVX2-NEXT: cmovnel %r13d, %r12d +; AVX2-NEXT: addl $256, %r12d # imm = 0x100 +; AVX2-NEXT: movq %r11, %rdx +; AVX2-NEXT: orq %rbp, %rdx +; AVX2-NEXT: movq %r8, %r13 +; AVX2-NEXT: orq %rbx, %r13 +; AVX2-NEXT: orq %rdx, %r13 +; AVX2-NEXT: cmovnel %eax, %r12d +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: tzcntq %r15, %rdx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %r14, %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: testq %r15, %r15 +; AVX2-NEXT: cmovnel %edx, %eax +; AVX2-NEXT: movq 88(%rdi), %rbp +; AVX2-NEXT: xorl %r13d, %r13d +; AVX2-NEXT: tzcntq %rbp, %r13 +; AVX2-NEXT: addl $64, %r13d +; AVX2-NEXT: movq 80(%rdi), %r10 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: tzcntq %r10, %rcx +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: cmovnel %ecx, %r13d +; AVX2-NEXT: subl $-128, %r13d +; AVX2-NEXT: movq %r15, %rcx +; AVX2-NEXT: orq %r14, %rcx +; AVX2-NEXT: cmovnel %eax, %r13d +; AVX2-NEXT: movq 104(%rdi), %r9 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: tzcntq %r9, %rcx +; AVX2-NEXT: addl $64, %ecx +; AVX2-NEXT: movq 96(%rdi), %rdx +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq %rdx, %rax +; AVX2-NEXT: testq %rdx, %rdx +; AVX2-NEXT: cmovnel %eax, %ecx +; AVX2-NEXT: movq 112(%rdi), %rsi +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: tzcntq 120(%rdi), %rax +; AVX2-NEXT: addl $64, %eax +; AVX2-NEXT: tzcntq %rsi, %rdi +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: cmovnel %edi, %eax +; AVX2-NEXT: subl $-128, %eax +; AVX2-NEXT: orq %r9, %rdx +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: orq %rbp, %r14 +; AVX2-NEXT: orq %r10, %r15 +; AVX2-NEXT: addl $256, %eax # imm = 0x100 +; AVX2-NEXT: orq %r14, %r15 +; AVX2-NEXT: cmovnel %r13d, %eax +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload +; AVX2-NEXT: orq %rcx, %r11 +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload +; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload +; AVX2-NEXT: orq %rbx, %r8 +; AVX2-NEXT: addl $512, %eax # imm = 0x200 +; AVX2-NEXT: orq %r11, %r8 +; AVX2-NEXT: cmovnel %r12d, %eax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_cttz_i1024: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq 88(%rdi), %rbp +; AVX512-NEXT: movq 72(%rdi), %r15 +; AVX512-NEXT: movq 56(%rdi), %r9 +; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq 48(%rdi), %rcx +; AVX512-NEXT: movq 40(%rdi), %r10 +; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq 32(%rdi), %rsi +; AVX512-NEXT: movq 24(%rdi), %r14 +; AVX512-NEXT: movq 16(%rdi), %rbx +; AVX512-NEXT: movq (%rdi), %r8 +; AVX512-NEXT: movq 8(%rdi), %r11 +; AVX512-NEXT: tzcntq %r8, %rax +; AVX512-NEXT: tzcntq %r11, %rdx +; AVX512-NEXT: addl $64, %edx +; AVX512-NEXT: testq %r8, %r8 +; AVX512-NEXT: cmovnel %eax, %edx +; AVX512-NEXT: tzcntq %rbx, %r12 +; AVX512-NEXT: tzcntq %r14, %rax +; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %rbx, %rbx +; AVX512-NEXT: cmovnel %r12d, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: movq %r8, %r12 +; AVX512-NEXT: orq %r11, %r12 +; AVX512-NEXT: cmovnel %edx, %eax +; AVX512-NEXT: tzcntq %rsi, %rdx +; AVX512-NEXT: tzcntq %r10, %r13 +; AVX512-NEXT: addl $64, %r13d +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: cmovnel %edx, %r13d +; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: tzcntq %rcx, %rdx +; AVX512-NEXT: tzcntq %r9, %r12 +; AVX512-NEXT: addl $64, %r12d +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: cmovnel %edx, %r12d +; AVX512-NEXT: subl $-128, %r12d +; AVX512-NEXT: movq %rsi, %rdx +; AVX512-NEXT: orq %r10, %rdx +; AVX512-NEXT: cmovnel %r13d, %r12d +; AVX512-NEXT: addl $256, %r12d # imm = 0x100 +; AVX512-NEXT: movq %r11, %rdx +; AVX512-NEXT: orq %r14, %rdx +; AVX512-NEXT: movq %r8, %r13 +; AVX512-NEXT: orq %rbx, %r13 +; AVX512-NEXT: orq %rdx, %r13 +; AVX512-NEXT: movq 64(%rdi), %r13 +; AVX512-NEXT: cmovnel %eax, %r12d +; AVX512-NEXT: tzcntq %r13, %rdx +; AVX512-NEXT: tzcntq %r15, %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: testq %r13, %r13 +; AVX512-NEXT: cmovnel %edx, %eax +; AVX512-NEXT: movq %rbp, %r14 +; AVX512-NEXT: tzcntq %rbp, %rbp +; AVX512-NEXT: addl $64, %ebp +; AVX512-NEXT: movq 80(%rdi), %r10 +; AVX512-NEXT: tzcntq %r10, %rcx +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: cmovnel %ecx, %ebp +; AVX512-NEXT: subl $-128, %ebp +; AVX512-NEXT: movq %r13, %rcx +; AVX512-NEXT: orq %r15, %rcx +; AVX512-NEXT: cmovnel %eax, %ebp +; AVX512-NEXT: movq 104(%rdi), %r9 +; AVX512-NEXT: tzcntq %r9, %rcx +; AVX512-NEXT: addl $64, %ecx +; AVX512-NEXT: movq 96(%rdi), %rdx +; AVX512-NEXT: tzcntq %rdx, %rax +; AVX512-NEXT: testq %rdx, %rdx +; AVX512-NEXT: cmovnel %eax, %ecx +; AVX512-NEXT: movq 112(%rdi), %rsi +; AVX512-NEXT: tzcntq 120(%rdi), %rax +; AVX512-NEXT: addl $64, %eax +; AVX512-NEXT: tzcntq %rsi, %rdi +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: cmovnel %edi, %eax +; AVX512-NEXT: subl $-128, %eax +; AVX512-NEXT: orq %r9, %rdx +; AVX512-NEXT: cmovnel %ecx, %eax +; AVX512-NEXT: orq %r14, %r15 +; AVX512-NEXT: orq %r10, %r13 +; AVX512-NEXT: addl $256, %eax # imm = 0x100 +; AVX512-NEXT: orq %r15, %r13 +; AVX512-NEXT: cmovnel %ebp, %eax +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload +; AVX512-NEXT: orq %rcx, %r11 +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload +; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload +; AVX512-NEXT: orq %rbx, %r8 +; AVX512-NEXT: addl $512, %eax # imm = 0x200 +; AVX512-NEXT: orq %r11, %r8 +; AVX512-NEXT: cmovnel %r12d, %eax +; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %a0 = load i1024, ptr %p0 + %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0) + %res = trunc i1024 %cnt to i32 + ret i32 %res +} + +; +; CTTZ_ZERO_UNDEF +; + +define i32 @test_cttz_undef_i128(i128 %a0) nounwind { +; SSE-LABEL: test_cttz_undef_i128: ; SSE: # %bb.0: ; SSE-NEXT: rep bsfq %rdi, %rcx -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq %rsi, %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %rdi, %rdi @@ -1822,7 +4325,7 @@ define i32 @test_cttz_i128(i128 %a0) nounwind { ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: retq ; -; AVX2-LABEL: test_cttz_i128: +; AVX2-LABEL: test_cttz_undef_i128: ; AVX2: # %bb.0: ; AVX2-NEXT: tzcntq %rdi, %rcx ; AVX2-NEXT: tzcntq %rsi, %rax @@ -1832,7 +4335,7 @@ define i32 @test_cttz_i128(i128 %a0) nounwind { ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_cttz_i128: +; AVX512-LABEL: test_cttz_undef_i128: ; AVX512: # %bb.0: ; AVX512-NEXT: tzcntq %rdi, %rcx ; AVX512-NEXT: tzcntq %rsi, %rax @@ -1841,17 +4344,16 @@ define i32 @test_cttz_i128(i128 %a0) nounwind { ; AVX512-NEXT: cmovnel %ecx, %eax ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512-NEXT: retq - %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0) + %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 -1) %res = trunc i128 %cnt to i32 ret i32 %res } -define i32 @load_cttz_i128(ptr %p0) nounwind { -; SSE-LABEL: load_cttz_i128: +define i32 @load_cttz_undef_i128(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_undef_i128: ; SSE: # %bb.0: ; SSE-NEXT: movq (%rdi), %rcx ; SSE-NEXT: rep bsfq %rcx, %rdx -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq 8(%rdi), %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %rcx, %rcx @@ -1859,7 +4361,7 @@ define i32 @load_cttz_i128(ptr %p0) nounwind { ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: retq ; -; AVX2-LABEL: load_cttz_i128: +; AVX2-LABEL: load_cttz_undef_i128: ; AVX2: # %bb.0: ; AVX2-NEXT: movq (%rdi), %rcx ; AVX2-NEXT: tzcntq %rcx, %rdx @@ -1870,7 +4372,7 @@ define i32 @load_cttz_i128(ptr %p0) nounwind { ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_i128: +; AVX512-LABEL: load_cttz_undef_i128: ; AVX512: # %bb.0: ; AVX512-NEXT: movq (%rdi), %rcx ; AVX512-NEXT: tzcntq %rcx, %rdx @@ -1881,13 +4383,13 @@ define i32 @load_cttz_i128(ptr %p0) nounwind { ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512-NEXT: retq %a0 = load i128, ptr %p0 - %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0) + %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 -1) %res = trunc i128 %cnt to i32 ret i32 %res } -define i32 @test_cttz_i256(i256 %a0) nounwind { -; SSE-LABEL: test_cttz_i256: +define i32 @test_cttz_undef_i256(i256 %a0) nounwind { +; SSE-LABEL: test_cttz_undef_i256: ; SSE: # %bb.0: ; SSE-NEXT: rep bsfq %rdi, %rax ; SSE-NEXT: rep bsfq %rsi, %r8 @@ -1895,7 +4397,6 @@ define i32 @test_cttz_i256(i256 %a0) nounwind { ; SSE-NEXT: testq %rdi, %rdi ; SSE-NEXT: cmovnel %eax, %r8d ; SSE-NEXT: rep bsfq %rdx, %r9 -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq %rcx, %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %rdx, %rdx @@ -1906,7 +4407,7 @@ define i32 @test_cttz_i256(i256 %a0) nounwind { ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: retq ; -; AVX2-LABEL: test_cttz_i256: +; AVX2-LABEL: test_cttz_undef_i256: ; AVX2: # %bb.0: ; AVX2-NEXT: tzcntq %rdi, %rax ; AVX2-NEXT: tzcntq %rsi, %r8 @@ -1925,7 +4426,7 @@ define i32 @test_cttz_i256(i256 %a0) nounwind { ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_cttz_i256: +; AVX512-LABEL: test_cttz_undef_i256: ; AVX512: # %bb.0: ; AVX512-NEXT: tzcntq %rdi, %rax ; AVX512-NEXT: tzcntq %rsi, %r8 @@ -1942,13 +4443,13 @@ define i32 @test_cttz_i256(i256 %a0) nounwind { ; AVX512-NEXT: cmovnel %r8d, %eax ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512-NEXT: retq - %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0) + %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 -1) %res = trunc i256 %cnt to i32 ret i32 %res } -define i32 @load_cttz_i256(ptr %p0) nounwind { -; SSE-LABEL: load_cttz_i256: +define i32 @load_cttz_undef_i256(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_undef_i256: ; SSE: # %bb.0: ; SSE-NEXT: movq 16(%rdi), %rcx ; SSE-NEXT: movq (%rdi), %rdx @@ -1959,7 +4460,6 @@ define i32 @load_cttz_i256(ptr %p0) nounwind { ; SSE-NEXT: testq %rdx, %rdx ; SSE-NEXT: cmovnel %eax, %r8d ; SSE-NEXT: rep bsfq %rcx, %r9 -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq 24(%rdi), %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %rcx, %rcx @@ -1970,7 +4470,7 @@ define i32 @load_cttz_i256(ptr %p0) nounwind { ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: retq ; -; AVX2-LABEL: load_cttz_i256: +; AVX2-LABEL: load_cttz_undef_i256: ; AVX2: # %bb.0: ; AVX2-NEXT: movq (%rdi), %rcx ; AVX2-NEXT: movq 8(%rdi), %rdx @@ -1992,7 +4492,7 @@ define i32 @load_cttz_i256(ptr %p0) nounwind { ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_i256: +; AVX512-LABEL: load_cttz_undef_i256: ; AVX512: # %bb.0: ; AVX512-NEXT: movq 16(%rdi), %rcx ; AVX512-NEXT: movq (%rdi), %rdx @@ -2013,13 +4513,13 @@ define i32 @load_cttz_i256(ptr %p0) nounwind { ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512-NEXT: retq %a0 = load i256, ptr %p0 - %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0) + %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 -1) %res = trunc i256 %cnt to i32 ret i32 %res } -define i32 @test_cttz_i512(i512 %a0) nounwind { -; SSE-LABEL: test_cttz_i512: +define i32 @test_cttz_undef_i512(i512 %a0) nounwind { +; SSE-LABEL: test_cttz_undef_i512: ; SSE: # %bb.0: ; SSE-NEXT: pushq %r14 ; SSE-NEXT: pushq %rbx @@ -2033,7 +4533,6 @@ define i32 @test_cttz_i512(i512 %a0) nounwind { ; SSE-NEXT: addl $64, %r10d ; SSE-NEXT: testq %rdx, %rdx ; SSE-NEXT: cmovnel %eax, %r10d -; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx ; SSE-NEXT: subl $-128, %r10d ; SSE-NEXT: movq %rdi, %rax ; SSE-NEXT: orq %rsi, %rax @@ -2043,8 +4542,8 @@ define i32 @test_cttz_i512(i512 %a0) nounwind { ; SSE-NEXT: addl $64, %r11d ; SSE-NEXT: testq %r8, %r8 ; SSE-NEXT: cmovnel %eax, %r11d +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx ; SSE-NEXT: rep bsfq %rbx, %r14 -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %rbx, %rbx @@ -2062,7 +4561,7 @@ define i32 @test_cttz_i512(i512 %a0) nounwind { ; SSE-NEXT: popq %r14 ; SSE-NEXT: retq ; -; AVX2-LABEL: test_cttz_i512: +; AVX2-LABEL: test_cttz_undef_i512: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %r14 ; AVX2-NEXT: pushq %rbx @@ -2109,7 +4608,7 @@ define i32 @test_cttz_i512(i512 %a0) nounwind { ; AVX2-NEXT: popq %r14 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_cttz_i512: +; AVX512-LABEL: test_cttz_undef_i512: ; AVX512: # %bb.0: ; AVX512-NEXT: pushq %r14 ; AVX512-NEXT: pushq %rbx @@ -2150,64 +4649,63 @@ define i32 @test_cttz_i512(i512 %a0) nounwind { ; AVX512-NEXT: popq %rbx ; AVX512-NEXT: popq %r14 ; AVX512-NEXT: retq - %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0) + %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 -1) %res = trunc i512 %cnt to i32 ret i32 %res } -define i32 @load_cttz_i512(ptr %p0) nounwind { -; SSE-LABEL: load_cttz_i512: +define i32 @load_cttz_undef_i512(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_undef_i512: ; SSE: # %bb.0: ; SSE-NEXT: pushq %r15 ; SSE-NEXT: pushq %r14 ; SSE-NEXT: pushq %rbx -; SSE-NEXT: movq 48(%rdi), %r10 ; SSE-NEXT: movq 40(%rdi), %r9 ; SSE-NEXT: movq 24(%rdi), %r8 ; SSE-NEXT: movq 16(%rdi), %rdx ; SSE-NEXT: movq (%rdi), %rcx ; SSE-NEXT: movq 8(%rdi), %rsi ; SSE-NEXT: rep bsfq %rcx, %rax -; SSE-NEXT: rep bsfq %rsi, %rbx -; SSE-NEXT: addl $64, %ebx +; SSE-NEXT: rep bsfq %rsi, %r11 +; SSE-NEXT: addl $64, %r11d ; SSE-NEXT: testq %rcx, %rcx -; SSE-NEXT: cmovnel %eax, %ebx +; SSE-NEXT: cmovnel %eax, %r11d ; SSE-NEXT: rep bsfq %rdx, %rax -; SSE-NEXT: rep bsfq %r8, %r11 -; SSE-NEXT: addl $64, %r11d +; SSE-NEXT: rep bsfq %r8, %r10 +; SSE-NEXT: addl $64, %r10d ; SSE-NEXT: testq %rdx, %rdx -; SSE-NEXT: cmovnel %eax, %r11d -; SSE-NEXT: movq 32(%rdi), %r14 -; SSE-NEXT: subl $-128, %r11d +; SSE-NEXT: cmovnel %eax, %r10d +; SSE-NEXT: movq 32(%rdi), %rbx +; SSE-NEXT: subl $-128, %r10d ; SSE-NEXT: movq %rcx, %rax ; SSE-NEXT: orq %rsi, %rax -; SSE-NEXT: cmovnel %ebx, %r11d -; SSE-NEXT: rep bsfq %r14, %rax -; SSE-NEXT: rep bsfq %r9, %rbx -; SSE-NEXT: addl $64, %ebx -; SSE-NEXT: testq %r14, %r14 -; SSE-NEXT: cmovnel %eax, %ebx -; SSE-NEXT: rep bsfq %r10, %r15 -; SSE-NEXT: movl $64, %eax +; SSE-NEXT: cmovnel %r11d, %r10d +; SSE-NEXT: rep bsfq %rbx, %rax +; SSE-NEXT: rep bsfq %r9, %r11 +; SSE-NEXT: addl $64, %r11d +; SSE-NEXT: testq %rbx, %rbx +; SSE-NEXT: cmovnel %eax, %r11d +; SSE-NEXT: movq 48(%rdi), %r14 +; SSE-NEXT: rep bsfq %r14, %r15 ; SSE-NEXT: rep bsfq 56(%rdi), %rax ; SSE-NEXT: addl $64, %eax -; SSE-NEXT: testq %r10, %r10 +; SSE-NEXT: testq %r14, %r14 ; SSE-NEXT: cmovnel %r15d, %eax ; SSE-NEXT: subl $-128, %eax -; SSE-NEXT: orq %r9, %r14 -; SSE-NEXT: cmovnel %ebx, %eax +; SSE-NEXT: orq %r9, %rbx +; SSE-NEXT: cmovnel %r11d, %eax ; SSE-NEXT: addl $256, %eax # imm = 0x100 ; SSE-NEXT: orq %r8, %rsi ; SSE-NEXT: orq %rdx, %rcx ; SSE-NEXT: orq %rsi, %rcx -; SSE-NEXT: cmovnel %r11d, %eax +; SSE-NEXT: cmovnel %r10d, %eax ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: popq %rbx ; SSE-NEXT: popq %r14 ; SSE-NEXT: popq %r15 ; SSE-NEXT: retq ; -; AVX2-LABEL: load_cttz_i512: +; AVX2-LABEL: load_cttz_undef_i512: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %r15 ; AVX2-NEXT: pushq %r14 @@ -2263,7 +4761,7 @@ define i32 @load_cttz_i512(ptr %p0) nounwind { ; AVX2-NEXT: popq %r15 ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_i512: +; AVX512-LABEL: load_cttz_undef_i512: ; AVX512: # %bb.0: ; AVX512-NEXT: pushq %r14 ; AVX512-NEXT: pushq %rbx @@ -2311,13 +4809,13 @@ define i32 @load_cttz_i512(ptr %p0) nounwind { ; AVX512-NEXT: popq %r14 ; AVX512-NEXT: retq %a0 = load i512, ptr %p0 - %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0) + %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 -1) %res = trunc i512 %cnt to i32 ret i32 %res } -define i32 @test_cttz_i1024(i1024 %a0) nounwind { -; SSE-LABEL: test_cttz_i1024: +define i32 @test_cttz_undef_i1024(i1024 %a0) nounwind { +; SSE-LABEL: test_cttz_undef_i1024: ; SSE: # %bb.0: ; SSE-NEXT: pushq %rbp ; SSE-NEXT: pushq %r15 @@ -2325,74 +4823,72 @@ define i32 @test_cttz_i1024(i1024 %a0) nounwind { ; SSE-NEXT: pushq %r13 ; SSE-NEXT: pushq %r12 ; SSE-NEXT: pushq %rbx -; SSE-NEXT: movq %r9, %r13 -; SSE-NEXT: movq %r8, %r14 +; SSE-NEXT: movq %r9, %r14 ; SSE-NEXT: movq %rcx, %rbx ; SSE-NEXT: movq %rdx, %r10 ; SSE-NEXT: movq %rsi, %r9 -; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rsi -; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11 +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rcx ; SSE-NEXT: rep bsfq %rdi, %rax -; SSE-NEXT: rep bsfq %r9, %r15 -; SSE-NEXT: addl $64, %r15d +; SSE-NEXT: rep bsfq %rsi, %r12 +; SSE-NEXT: addl $64, %r12d ; SSE-NEXT: testq %rdi, %rdi -; SSE-NEXT: cmovnel %eax, %r15d -; SSE-NEXT: rep bsfq %r10, %r12 -; SSE-NEXT: rep bsfq %rcx, %rax +; SSE-NEXT: cmovnel %eax, %r12d +; SSE-NEXT: rep bsfq %r10, %r15 +; SSE-NEXT: rep bsfq %rbx, %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %r10, %r10 -; SSE-NEXT: cmovnel %r12d, %eax -; SSE-NEXT: subl $-128, %eax -; SSE-NEXT: movq %rdi, %r12 -; SSE-NEXT: orq %r9, %r12 ; SSE-NEXT: cmovnel %r15d, %eax -; SSE-NEXT: rep bsfq %r8, %r15 -; SSE-NEXT: movq %r13, %rcx -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: rep bsfq %r13, %r13 +; SSE-NEXT: subl $-128, %eax +; SSE-NEXT: movq %rdi, %r13 +; SSE-NEXT: orq %rsi, %r13 +; SSE-NEXT: cmovnel %r12d, %eax +; SSE-NEXT: movq %r8, %r15 +; SSE-NEXT: rep bsfq %r8, %r12 +; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: rep bsfq %r14, %r13 ; SSE-NEXT: addl $64, %r13d ; SSE-NEXT: testq %r8, %r8 -; SSE-NEXT: cmovnel %r15d, %r13d -; SSE-NEXT: rep bsfq %rdx, %r12 -; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r15 -; SSE-NEXT: addl $64, %r15d -; SSE-NEXT: testq %rdx, %rdx -; SSE-NEXT: cmovnel %r12d, %r15d -; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r12 -; SSE-NEXT: subl $-128, %r15d +; SSE-NEXT: cmovnel %r12d, %r13d +; SSE-NEXT: rep bsfq %rcx, %rbp +; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r12 +; SSE-NEXT: addl $64, %r12d +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %ebp, %r12d +; SSE-NEXT: subl $-128, %r12d ; SSE-NEXT: movq %r8, %rbp -; SSE-NEXT: orq %rcx, %rbp -; SSE-NEXT: cmovnel %r13d, %r15d -; SSE-NEXT: addl $256, %r15d # imm = 0x100 -; SSE-NEXT: movq %r9, %r13 +; SSE-NEXT: orq %r14, %rbp +; SSE-NEXT: cmovnel %r13d, %r12d +; SSE-NEXT: addl $256, %r12d # imm = 0x100 +; SSE-NEXT: movq %rsi, %r13 ; SSE-NEXT: orq %rbx, %r13 ; SSE-NEXT: movq %rdi, %rbp ; SSE-NEXT: orq %r10, %rbp ; SSE-NEXT: orq %r13, %rbp -; SSE-NEXT: cmovnel %eax, %r15d -; SSE-NEXT: rep bsfq %r11, %r13 -; SSE-NEXT: rep bsfq %r12, %rax +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r13 +; SSE-NEXT: cmovnel %eax, %r12d +; SSE-NEXT: rep bsfq %r11, %rbp +; SSE-NEXT: rep bsfq %r13, %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: testq %r11, %r11 -; SSE-NEXT: cmovnel %r13d, %eax -; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r13 -; SSE-NEXT: addl $64, %r13d -; SSE-NEXT: rep bsfq %rsi, %rcx -; SSE-NEXT: testq %rsi, %rsi -; SSE-NEXT: cmovnel %ecx, %r13d -; SSE-NEXT: subl $-128, %r13d +; SSE-NEXT: cmovnel %ebp, %eax +; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rbp +; SSE-NEXT: addl $64, %ebp +; SSE-NEXT: rep bsfq %rdx, %rcx +; SSE-NEXT: testq %rdx, %rdx +; SSE-NEXT: cmovnel %ecx, %ebp +; SSE-NEXT: subl $-128, %ebp ; SSE-NEXT: movq %r11, %rcx -; SSE-NEXT: orq %r12, %rcx -; SSE-NEXT: cmovnel %eax, %r13d -; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbp -; SSE-NEXT: rep bsfq %rbp, %rcx +; SSE-NEXT: orq %r13, %rcx +; SSE-NEXT: cmovnel %eax, %ebp +; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; SSE-NEXT: rep bsfq %r14, %rcx ; SSE-NEXT: addl $64, %ecx ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx ; SSE-NEXT: rep bsfq %rdx, %rax ; SSE-NEXT: testq %rdx, %rdx ; SSE-NEXT: cmovnel %eax, %ecx -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax ; SSE-NEXT: addl $64, %eax ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8 @@ -2400,22 +4896,22 @@ define i32 @test_cttz_i1024(i1024 %a0) nounwind { ; SSE-NEXT: testq %r8, %r8 ; SSE-NEXT: cmovnel %esi, %eax ; SSE-NEXT: subl $-128, %eax -; SSE-NEXT: orq %rbp, %rdx +; SSE-NEXT: orq %r14, %rdx ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r12 +; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r13 ; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r11 ; SSE-NEXT: addl $256, %eax # imm = 0x100 -; SSE-NEXT: orq %r12, %r11 -; SSE-NEXT: cmovnel %r13d, %eax +; SSE-NEXT: orq %r13, %r11 +; SSE-NEXT: cmovnel %ebp, %eax ; SSE-NEXT: orq {{[0-9]+}}(%rsp), %rbx ; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload ; SSE-NEXT: orq %rbx, %r9 ; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10 -; SSE-NEXT: orq %r14, %rdi +; SSE-NEXT: orq %r15, %rdi ; SSE-NEXT: orq %r10, %rdi ; SSE-NEXT: addl $512, %eax # imm = 0x200 ; SSE-NEXT: orq %r9, %rdi -; SSE-NEXT: cmovnel %r15d, %eax +; SSE-NEXT: cmovnel %r12d, %eax ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: popq %rbx ; SSE-NEXT: popq %r12 @@ -2425,7 +4921,7 @@ define i32 @test_cttz_i1024(i1024 %a0) nounwind { ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX2-LABEL: test_cttz_i1024: +; AVX2-LABEL: test_cttz_undef_i1024: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp ; AVX2-NEXT: pushq %r15 @@ -2547,7 +5043,7 @@ define i32 @test_cttz_i1024(i1024 %a0) nounwind { ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_cttz_i1024: +; AVX512-LABEL: test_cttz_undef_i1024: ; AVX512: # %bb.0: ; AVX512-NEXT: pushq %rbp ; AVX512-NEXT: pushq %r15 @@ -2652,13 +5148,13 @@ define i32 @test_cttz_i1024(i1024 %a0) nounwind { ; AVX512-NEXT: popq %r15 ; AVX512-NEXT: popq %rbp ; AVX512-NEXT: retq - %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0) + %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 -1) %res = trunc i1024 %cnt to i32 ret i32 %res } -define i32 @load_cttz_i1024(ptr %p0) nounwind { -; SSE-LABEL: load_cttz_i1024: +define i32 @load_cttz_undef_i1024(ptr %p0) nounwind { +; SSE-LABEL: load_cttz_undef_i1024: ; SSE: # %bb.0: ; SSE-NEXT: pushq %rbp ; SSE-NEXT: pushq %r15 @@ -2666,14 +5162,14 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; SSE-NEXT: pushq %r13 ; SSE-NEXT: pushq %r12 ; SSE-NEXT: pushq %rbx -; SSE-NEXT: movq 88(%rdi), %r10 +; SSE-NEXT: movq 72(%rdi), %rbx +; SSE-NEXT: movq 56(%rdi), %r9 +; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 48(%rdi), %rcx +; SSE-NEXT: movq 40(%rdi), %r10 ; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 56(%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 40(%rdi), %rsi -; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 24(%rdi), %r9 -; SSE-NEXT: movq 16(%rdi), %r15 +; SSE-NEXT: movq 32(%rdi), %rsi +; SSE-NEXT: movq 24(%rdi), %rbp ; SSE-NEXT: movq (%rdi), %r8 ; SSE-NEXT: movq 8(%rdi), %r11 ; SSE-NEXT: rep bsfq %r8, %rax @@ -2681,57 +5177,57 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; SSE-NEXT: addl $64, %edx ; SSE-NEXT: testq %r8, %r8 ; SSE-NEXT: cmovnel %eax, %edx -; SSE-NEXT: rep bsfq %r15, %rbx -; SSE-NEXT: rep bsfq %r9, %rax -; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 16(%rdi), %r14 +; SSE-NEXT: rep bsfq %r14, %r15 +; SSE-NEXT: rep bsfq %rbp, %rax +; SSE-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE-NEXT: addl $64, %eax -; SSE-NEXT: testq %r15, %r15 -; SSE-NEXT: cmovnel %ebx, %eax -; SSE-NEXT: movq 32(%rdi), %rbx +; SSE-NEXT: testq %r14, %r14 +; SSE-NEXT: cmovnel %r15d, %eax ; SSE-NEXT: subl $-128, %eax -; SSE-NEXT: movq %r8, %r14 -; SSE-NEXT: orq %r11, %r14 +; SSE-NEXT: movq %r8, %r15 +; SSE-NEXT: orq %r11, %r15 ; SSE-NEXT: cmovnel %edx, %eax -; SSE-NEXT: rep bsfq %rbx, %rdx -; SSE-NEXT: rep bsfq %rsi, %r12 -; SSE-NEXT: addl $64, %r12d -; SSE-NEXT: testq %rbx, %rbx -; SSE-NEXT: cmovnel %edx, %r12d -; SSE-NEXT: movq 48(%rdi), %r13 -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: rep bsfq %r13, %rdx -; SSE-NEXT: rep bsfq %rcx, %r14 -; SSE-NEXT: addl $64, %r14d -; SSE-NEXT: testq %r13, %r13 -; SSE-NEXT: cmovnel %edx, %r14d -; SSE-NEXT: subl $-128, %r14d -; SSE-NEXT: movq %rbx, %rdx -; SSE-NEXT: orq %rsi, %rdx -; SSE-NEXT: cmovnel %r12d, %r14d -; SSE-NEXT: movq 72(%rdi), %r12 -; SSE-NEXT: addl $256, %r14d # imm = 0x100 +; SSE-NEXT: rep bsfq %rsi, %rdx +; SSE-NEXT: rep bsfq %r10, %r13 +; SSE-NEXT: addl $64, %r13d +; SSE-NEXT: testq %rsi, %rsi +; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: cmovnel %edx, %r13d +; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: rep bsfq %rcx, %rdx +; SSE-NEXT: rep bsfq %r9, %r15 +; SSE-NEXT: addl $64, %r15d +; SSE-NEXT: testq %rcx, %rcx +; SSE-NEXT: cmovnel %edx, %r15d +; SSE-NEXT: movq 64(%rdi), %r12 +; SSE-NEXT: subl $-128, %r15d +; SSE-NEXT: movq %rsi, %rdx +; SSE-NEXT: orq %r10, %rdx +; SSE-NEXT: cmovnel %r13d, %r15d +; SSE-NEXT: addl $256, %r15d # imm = 0x100 ; SSE-NEXT: movq %r11, %rdx -; SSE-NEXT: orq %r9, %rdx +; SSE-NEXT: orq %rbp, %rdx ; SSE-NEXT: movq %r8, %r13 -; SSE-NEXT: orq %r15, %r13 +; SSE-NEXT: orq %r14, %r13 ; SSE-NEXT: orq %rdx, %r13 -; SSE-NEXT: movq 64(%rdi), %r13 -; SSE-NEXT: cmovnel %eax, %r14d -; SSE-NEXT: rep bsfq %r13, %rdx -; SSE-NEXT: rep bsfq %r12, %rax +; SSE-NEXT: cmovnel %eax, %r15d +; SSE-NEXT: rep bsfq %r12, %rdx +; SSE-NEXT: rep bsfq %rbx, %rax ; SSE-NEXT: addl $64, %eax -; SSE-NEXT: testq %r13, %r13 +; SSE-NEXT: testq %r12, %r12 ; SSE-NEXT: cmovnel %edx, %eax -; SSE-NEXT: rep bsfq %r10, %rbp -; SSE-NEXT: addl $64, %ebp +; SSE-NEXT: movq 88(%rdi), %rbp +; SSE-NEXT: rep bsfq %rbp, %r13 +; SSE-NEXT: addl $64, %r13d ; SSE-NEXT: movq 80(%rdi), %r10 ; SSE-NEXT: rep bsfq %r10, %rcx ; SSE-NEXT: testq %r10, %r10 -; SSE-NEXT: cmovnel %ecx, %ebp -; SSE-NEXT: subl $-128, %ebp -; SSE-NEXT: movq %r13, %rcx -; SSE-NEXT: orq %r12, %rcx -; SSE-NEXT: cmovnel %eax, %ebp +; SSE-NEXT: cmovnel %ecx, %r13d +; SSE-NEXT: subl $-128, %r13d +; SSE-NEXT: movq %r12, %rcx +; SSE-NEXT: orq %rbx, %rcx +; SSE-NEXT: cmovnel %eax, %r13d ; SSE-NEXT: movq 104(%rdi), %r9 ; SSE-NEXT: rep bsfq %r9, %rcx ; SSE-NEXT: addl $64, %ecx @@ -2739,7 +5235,6 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; SSE-NEXT: rep bsfq %rdx, %rax ; SSE-NEXT: testq %rdx, %rdx ; SSE-NEXT: cmovnel %eax, %ecx -; SSE-NEXT: movl $64, %eax ; SSE-NEXT: rep bsfq 120(%rdi), %rax ; SSE-NEXT: movq 112(%rdi), %rdi ; SSE-NEXT: addl $64, %eax @@ -2749,21 +5244,21 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; SSE-NEXT: subl $-128, %eax ; SSE-NEXT: orq %r9, %rdx ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload -; SSE-NEXT: orq %r10, %r13 +; SSE-NEXT: orq %rbp, %rbx +; SSE-NEXT: orq %r10, %r12 ; SSE-NEXT: addl $256, %eax # imm = 0x100 -; SSE-NEXT: orq %r12, %r13 -; SSE-NEXT: cmovnel %ebp, %eax +; SSE-NEXT: orq %rbx, %r12 +; SSE-NEXT: cmovnel %r13d, %eax ; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload ; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload ; SSE-NEXT: orq %rcx, %r11 -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload -; SSE-NEXT: orq %rbx, %r8 -; SSE-NEXT: orq %r15, %r8 +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload +; SSE-NEXT: orq %r14, %r8 ; SSE-NEXT: addl $512, %eax # imm = 0x200 ; SSE-NEXT: orq %r11, %r8 -; SSE-NEXT: cmovnel %r14d, %eax +; SSE-NEXT: cmovnel %r15d, %eax ; SSE-NEXT: # kill: def $eax killed $eax killed $rax ; SSE-NEXT: popq %rbx ; SSE-NEXT: popq %r12 @@ -2773,7 +5268,7 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; -; AVX2-LABEL: load_cttz_i1024: +; AVX2-LABEL: load_cttz_undef_i1024: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp ; AVX2-NEXT: pushq %r15 @@ -2900,7 +5395,7 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_i1024: +; AVX512-LABEL: load_cttz_undef_i1024: ; AVX512: # %bb.0: ; AVX512-NEXT: pushq %rbp ; AVX512-NEXT: pushq %r15 @@ -3015,7 +5510,7 @@ define i32 @load_cttz_i1024(ptr %p0) nounwind { ; AVX512-NEXT: popq %rbp ; AVX512-NEXT: retq %a0 = load i1024, ptr %p0 - %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0) + %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 -1) %res = trunc i1024 %cnt to i32 ret i32 %res }