diff --git a/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll b/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll new file mode 100644 index 0000000000000..321615b90a3f3 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll @@ -0,0 +1,3847 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp | FileCheck %s + +; i32 saturate + +define arm_aapcs_vfpcc <2 x i32> @stest_f64i32(<2 x double> %x) { +; CHECK-LABEL: stest_f64i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: adr r3, .LCPI0_0 +; CHECK-NEXT: mvn r12, #-2147483648 +; CHECK-NEXT: vldrw.u32 q0, [r3] +; CHECK-NEXT: subs.w r3, r4, r12 +; CHECK-NEXT: sbcs r3, r5, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs.w r0, r0, r12 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r12, #-1 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: adr r4, .LCPI0_1 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vbic q0, q0, q2 +; CHECK-NEXT: vorr q0, q1, q0 +; CHECK-NEXT: vldrw.u32 q1, [r4] +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: rsbs.w r0, r0, #-2147483648 +; CHECK-NEXT: sbcs.w r0, r12, r1 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs.w r1, r3, #-2147483648 +; CHECK-NEXT: sbcs.w r1, r12, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 +; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 +; CHECK-NEXT: vbic q1, q1, q2 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .LCPI0_1: +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +entry: + %conv = fptosi <2 x double> %x to <2 x i64> + %0 = icmp slt <2 x i64> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> + %1 = icmp sgt <2 x i64> %spec.store.select, + %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> + %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> + ret <2 x i32> %conv6 +} + +define arm_aapcs_vfpcc <2 x i32> @utest_f64i32(<2 x double> %x) { +; CHECK-LABEL: utest_f64i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: subs.w r3, r4, #-1 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 +; CHECK-NEXT: sbcs r3, r5, #0 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov.i64 q0, #0xffffffff +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vbic q0, q0, q2 +; CHECK-NEXT: vorr q0, q1, q0 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptoui <2 x double> %x to <2 x i64> + %0 = icmp ult <2 x i64> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> + %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32> + ret <2 x i32> %conv6 +} + +define arm_aapcs_vfpcc <2 x i32> @ustest_f64i32(<2 x double> %x) { +; CHECK-LABEL: ustest_f64i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: subs.w r3, r4, #-1 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 +; CHECK-NEXT: sbcs r3, r5, #0 +; CHECK-NEXT: vmov.i64 q0, #0xffffffff +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vbic q0, q0, q2 +; CHECK-NEXT: vorr q0, q1, q0 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: sbcs.w r0, r2, r1 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs r1, r3, #0 +; CHECK-NEXT: sbcs.w r1, r2, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i64> + %0 = icmp slt <2 x i64> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> + %1 = icmp sgt <2 x i64> %spec.store.select, zeroinitializer + %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> zeroinitializer + %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> + ret <2 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @stest_f32i32(<4 x float> %x) { +; CHECK-LABEL: stest_f32i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #8 +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r6, d8 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: mov r9, r0 +; CHECK-NEXT: mvn r0, #-2147483648 +; CHECK-NEXT: subs.w r0, r9, r0 +; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mvn r4, #-2147483648 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: mov r0, r6 +; CHECK-NEXT: csetm r11, ne +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov r10, r1 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vmov r4, r0, d9 +; CHECK-NEXT: csetm r8, ne +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: adr r2, .LCPI3_0 +; CHECK-NEXT: mvn r4, #-2147483648 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: adr r2, .LCPI3_1 +; CHECK-NEXT: vldrw.u32 q2, [r2] +; CHECK-NEXT: subs r2, r5, r4 +; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: vmov q1[2], q1[0], r9, r6 +; CHECK-NEXT: vmov q4[2], q4[0], r11, r8 +; CHECK-NEXT: vmov q3[2], q3[0], r0, r5 +; CHECK-NEXT: vmov q1[3], q1[1], r2, r10 +; CHECK-NEXT: vmov q4[3], q4[1], r11, r8 +; CHECK-NEXT: vand q1, q1, q4 +; CHECK-NEXT: vbic q4, q2, q4 +; CHECK-NEXT: vorr q1, q1, q4 +; CHECK-NEXT: vmov q3[3], q3[1], r1, r7 +; CHECK-NEXT: vmov r2, r3, d2 +; CHECK-NEXT: sbcs r7, r7, #0 +; CHECK-NEXT: vmov r6, r5, d3 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csetm r7, ne +; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: mov.w r0, #-1 +; CHECK-NEXT: vmov q4[2], q4[0], r1, r7 +; CHECK-NEXT: vmov q4[3], q4[1], r1, r7 +; CHECK-NEXT: vand q3, q3, q4 +; CHECK-NEXT: vbic q2, q2, q4 +; CHECK-NEXT: vorr q2, q3, q2 +; CHECK-NEXT: vmov r1, r7, d5 +; CHECK-NEXT: rsbs.w r2, r2, #-2147483648 +; CHECK-NEXT: sbcs.w r2, r0, r3 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: rsbs.w r3, r6, #-2147483648 +; CHECK-NEXT: sbcs.w r3, r0, r5 +; CHECK-NEXT: vmov r6, r5, d4 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 +; CHECK-NEXT: sbcs.w r1, r0, r7 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: rsbs.w r7, r6, #-2147483648 +; CHECK-NEXT: sbcs r0, r5 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov.32 q3[1], r0 +; CHECK-NEXT: vmov q3[2], q3[0], r0, r1 +; CHECK-NEXT: vbic q4, q0, q3 +; CHECK-NEXT: vand q2, q2, q3 +; CHECK-NEXT: vmov.32 q3[1], r2 +; CHECK-NEXT: vorr q2, q2, q4 +; CHECK-NEXT: vmov q3[2], q3[0], r2, r3 +; CHECK-NEXT: vbic q0, q0, q3 +; CHECK-NEXT: vand q1, q1, q3 +; CHECK-NEXT: vorr q0, q1, q0 +; CHECK-NEXT: vmov.f32 s1, s2 +; CHECK-NEXT: vmov.f32 s2, s8 +; CHECK-NEXT: vmov.f32 s3, s10 +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI3_0: +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +; CHECK-NEXT: .LCPI3_1: +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %conv = fptosi <4 x float> %x to <4 x i64> + %0 = icmp slt <4 x i64> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> + %1 = icmp sgt <4 x i64> %spec.store.select, + %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @utest_f32i32(<4 x float> %x) { +; CHECK-LABEL: utest_f32i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r6, d9 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: mov r10, r0 +; CHECK-NEXT: mov r0, r6 +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: vmov r5, r0, d8 +; CHECK-NEXT: mov r9, r1 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: mov r4, r1 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: subs.w r3, r7, #-1 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 +; CHECK-NEXT: sbcs r3, r4, #0 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r10, r6 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs.w r1, r6, #-1 +; CHECK-NEXT: sbcs r1, r9, #0 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: subs.w r7, r10, #-1 +; CHECK-NEXT: sbcs r7, r8, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov.32 q2[1], r2 +; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vorn q1, q1, q2 +; CHECK-NEXT: vmov.32 q2[1], r0 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorn q0, q0, q2 +; CHECK-NEXT: vmov.f32 s1, s2 +; CHECK-NEXT: vmov.f32 s2, s4 +; CHECK-NEXT: vmov.f32 s3, s6 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} +entry: + %conv = fptoui <4 x float> %x to <4 x i64> + %0 = icmp ult <4 x i64> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> + %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @ustest_f32i32(<4 x float> %x) { +; CHECK-LABEL: ustest_f32i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r4, r0, d9 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: vmov r4, r2, d8 +; CHECK-NEXT: subs.w r3, r5, #-1 +; CHECK-NEXT: sbcs r3, r6, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov.i64 q5, #0xffffffff +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q1[2], q1[0], r0, r3 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r3 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vbic q1, q5, q1 +; CHECK-NEXT: vorr q4, q0, q1 +; CHECK-NEXT: vmov r9, r8, d9 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: subs.w r2, r5, #-1 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: sbcs r2, r6, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov r1, r3, d8 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs.w r6, r9, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 +; CHECK-NEXT: sbcs.w r6, r7, r8 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vbic q1, q5, q1 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: vmov r0, r2, d1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: vmov r5, r4, d0 +; CHECK-NEXT: csetm r6, ne +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: sbcs.w r1, r7, r3 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q2[2], q2[0], r1, r6 +; CHECK-NEXT: vand q2, q4, q2 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: sbcs.w r0, r7, r2 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs r2, r5, #0 +; CHECK-NEXT: sbcs.w r2, r7, r4 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov q1[2], q1[0], r2, r0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vmov.f32 s1, s2 +; CHECK-NEXT: vmov.f32 s2, s8 +; CHECK-NEXT: vmov.f32 s3, s10 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <4 x float> %x to <4 x i64> + %0 = icmp slt <4 x i64> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> + %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer + %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @stest_f16i32(<4 x half> %x) { +; CHECK-LABEL: stest_f16i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[0] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r9, r0 +; CHECK-NEXT: mvn r0, #-2147483648 +; CHECK-NEXT: subs.w r0, r9, r0 +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mvn r4, #-2147483648 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: csetm r11, ne +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: csetm r10, ne +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmov.u16 r0, q4[2] +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: adr r2, .LCPI6_0 +; CHECK-NEXT: vmov q1[2], q1[0], r9, r7 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: adr r2, .LCPI6_1 +; CHECK-NEXT: vmov q4[2], q4[0], r11, r10 +; CHECK-NEXT: vldrw.u32 q2, [r2] +; CHECK-NEXT: vmov q1[3], q1[1], r8, r6 +; CHECK-NEXT: vmov q4[3], q4[1], r11, r10 +; CHECK-NEXT: vand q1, q1, q4 +; CHECK-NEXT: vbic q4, q2, q4 +; CHECK-NEXT: mvn r12, #-2147483648 +; CHECK-NEXT: subs.w r2, r4, r12 +; CHECK-NEXT: vorr q1, q1, q4 +; CHECK-NEXT: vmov q3[2], q3[0], r0, r4 +; CHECK-NEXT: vmov r2, r3, d2 +; CHECK-NEXT: sbcs r7, r5, #0 +; CHECK-NEXT: vmov q3[3], q3[1], r1, r5 +; CHECK-NEXT: vmov r6, r5, d3 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csetm r7, ne +; CHECK-NEXT: subs.w r0, r0, r12 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: mov.w r0, #-1 +; CHECK-NEXT: vmov q4[2], q4[0], r1, r7 +; CHECK-NEXT: vmov q4[3], q4[1], r1, r7 +; CHECK-NEXT: vand q3, q3, q4 +; CHECK-NEXT: vbic q2, q2, q4 +; CHECK-NEXT: vorr q2, q3, q2 +; CHECK-NEXT: vmov r1, r7, d5 +; CHECK-NEXT: rsbs.w r2, r2, #-2147483648 +; CHECK-NEXT: sbcs.w r2, r0, r3 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: rsbs.w r3, r6, #-2147483648 +; CHECK-NEXT: sbcs.w r3, r0, r5 +; CHECK-NEXT: vmov r6, r5, d4 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 +; CHECK-NEXT: sbcs.w r1, r0, r7 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: rsbs.w r7, r6, #-2147483648 +; CHECK-NEXT: sbcs r0, r5 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov.32 q3[1], r0 +; CHECK-NEXT: vmov q3[2], q3[0], r0, r1 +; CHECK-NEXT: vbic q4, q0, q3 +; CHECK-NEXT: vand q2, q2, q3 +; CHECK-NEXT: vmov.32 q3[1], r2 +; CHECK-NEXT: vorr q2, q2, q4 +; CHECK-NEXT: vmov q3[2], q3[0], r2, r3 +; CHECK-NEXT: vbic q0, q0, q3 +; CHECK-NEXT: vand q1, q1, q3 +; CHECK-NEXT: vorr q0, q1, q0 +; CHECK-NEXT: vmov.f32 s1, s2 +; CHECK-NEXT: vmov.f32 s2, s8 +; CHECK-NEXT: vmov.f32 s3, s10 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI6_0: +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +; CHECK-NEXT: .LCPI6_1: +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %conv = fptosi <4 x half> %x to <4 x i64> + %0 = icmp slt <4 x i64> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> + %1 = icmp sgt <4 x i64> %spec.store.select, + %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @utesth_f16i32(<4 x half> %x) { +; CHECK-LABEL: utesth_f16i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: mov r9, r1 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: subs.w r3, r5, #-1 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: sbcs r3, r7, #0 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r4, r6 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs.w r1, r6, #-1 +; CHECK-NEXT: sbcs r1, r9, #0 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: subs.w r7, r4, #-1 +; CHECK-NEXT: sbcs r7, r8, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov.32 q2[1], r2 +; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vorn q1, q1, q2 +; CHECK-NEXT: vmov.32 q2[1], r0 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorn q0, q0, q2 +; CHECK-NEXT: vmov.f32 s1, s2 +; CHECK-NEXT: vmov.f32 s2, s4 +; CHECK-NEXT: vmov.f32 s3, s6 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptoui <4 x half> %x to <4 x i64> + %0 = icmp ult <4 x i64> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> + %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @ustest_f16i32(<4 x half> %x) { +; CHECK-LABEL: ustest_f16i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmov.u16 r0, q4[2] +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: subs.w r2, r4, #-1 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: sbcs r2, r5, #0 +; CHECK-NEXT: vmov.i64 q6, #0xffffffff +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vbic q1, q6, q1 +; CHECK-NEXT: vorr q5, q0, q1 +; CHECK-NEXT: vmov r7, r8, d11 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: subs.w r2, r4, #-1 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: sbcs r2, r5, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov r1, r3, d10 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs r7, r7, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 +; CHECK-NEXT: sbcs.w r7, r6, r8 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vbic q1, q6, q1 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: vmov r0, r2, d1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: vmov r5, r4, d0 +; CHECK-NEXT: csetm r7, ne +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: sbcs.w r1, r6, r3 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q2[2], q2[0], r1, r7 +; CHECK-NEXT: vand q2, q5, q2 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: sbcs.w r0, r6, r2 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs r2, r5, #0 +; CHECK-NEXT: sbcs.w r2, r6, r4 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov q1[2], q1[0], r2, r0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vmov.f32 s1, s2 +; CHECK-NEXT: vmov.f32 s2, s8 +; CHECK-NEXT: vmov.f32 s3, s10 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +entry: + %conv = fptosi <4 x half> %x to <4 x i64> + %0 = icmp slt <4 x i64> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> + %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer + %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +; i16 saturate + +define arm_aapcs_vfpcc <2 x i16> @stest_f64i16(<2 x double> %x) { +; CHECK-LABEL: stest_f64i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: movw r4, #32767 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: adr.w r12, .LCPI9_0 +; CHECK-NEXT: vmov r1, r2, d1 +; CHECK-NEXT: vldrw.u32 q1, [r12] +; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: movw lr, #32768 +; CHECK-NEXT: movt lr, #65535 +; CHECK-NEXT: mov.w r12, #-1 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: subs r1, r1, r4 +; CHECK-NEXT: sbcs r1, r2, #0 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: subs r2, r3, r4 +; CHECK-NEXT: sbcs r2, r5, #0 +; CHECK-NEXT: adr r4, .LCPI9_1 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 +; CHECK-NEXT: vbic q1, q1, q2 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vldrw.u32 q1, [r4] +; CHECK-NEXT: vmov r1, r2, d1 +; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: subs.w r1, lr, r1 +; CHECK-NEXT: sbcs.w r1, r12, r2 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: subs.w r2, lr, r3 +; CHECK-NEXT: sbcs.w r2, r12, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q2[2], q2[0], r0, r1 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r1 +; CHECK-NEXT: vbic q1, q1, q2 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI9_0: +; CHECK-NEXT: .long 32767 @ 0x7fff +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 32767 @ 0x7fff +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .LCPI9_1: +; CHECK-NEXT: .long 4294934528 @ 0xffff8000 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 4294934528 @ 0xffff8000 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %conv = fptosi <2 x double> %x to <2 x i32> + %0 = icmp slt <2 x i32> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> + %1 = icmp sgt <2 x i32> %spec.store.select, + %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> + %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> + ret <2 x i16> %conv6 +} + +define arm_aapcs_vfpcc <2 x i16> @utest_f64i16(<2 x double> %x) { +; CHECK-LABEL: utest_f64i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: movw r4, #65535 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov.i64 q1, #0xffff +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs r1, r2, r4 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 +; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 +; CHECK-NEXT: vbic q1, q1, q2 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptoui <2 x double> %x to <2 x i32> + %0 = icmp ult <2 x i32> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> + %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16> + ret <2 x i16> %conv6 +} + +define arm_aapcs_vfpcc <2 x i16> @ustest_f64i16(<2 x double> %x) { +; CHECK-LABEL: ustest_f64i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: movw r4, #65535 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: vmov.i64 q1, #0xffff +; CHECK-NEXT: vmov r1, r2, d1 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: subs r1, r1, r4 +; CHECK-NEXT: sbcs r1, r2, #0 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: subs r2, r3, r4 +; CHECK-NEXT: sbcs r2, r5, #0 +; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 +; CHECK-NEXT: vbic q1, q1, q2 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r1, r2, d1 +; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: sbcs.w r1, r0, r2 +; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: rsbs r2, r3, #0 +; CHECK-NEXT: sbcs.w r2, r0, r5 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q1[2], q1[0], r0, r1 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r1 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i32> + %0 = icmp slt <2 x i32> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> + %1 = icmp sgt <2 x i32> %spec.store.select, zeroinitializer + %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> zeroinitializer + %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> + ret <2 x i16> %conv6 +} + +define arm_aapcs_vfpcc <4 x i16> @stest_f32i16(<4 x float> %x) { +; CHECK-LABEL: stest_f32i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvt.s32.f32 q0, q0 +; CHECK-NEXT: vqmovnb.s32 q0, q0 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <4 x float> %x to <4 x i32> + %0 = icmp slt <4 x i32> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> + %1 = icmp sgt <4 x i32> %spec.store.select, + %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> + %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> + ret <4 x i16> %conv6 +} + +define arm_aapcs_vfpcc <4 x i16> @utest_f32i16(<4 x float> %x) { +; CHECK-LABEL: utest_f32i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvt.u32.f32 q0, q0 +; CHECK-NEXT: vqmovnb.u32 q0, q0 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %conv = fptoui <4 x float> %x to <4 x i32> + %0 = icmp ult <4 x i32> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> + %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16> + ret <4 x i16> %conv6 +} + +define arm_aapcs_vfpcc <4 x i16> @ustest_f32i16(<4 x float> %x) { +; CHECK-LABEL: ustest_f32i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q1, #0xffff +; CHECK-NEXT: vcvt.s32.f32 q0, q0 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmin.s32 q0, q0, q1 +; CHECK-NEXT: vmax.s32 q0, q0, q2 +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <4 x float> %x to <4 x i32> + %0 = icmp slt <4 x i32> %conv, + %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> + %1 = icmp sgt <4 x i32> %spec.store.select, zeroinitializer + %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> zeroinitializer + %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> + ret <4 x i16> %conv6 +} + +define arm_aapcs_vfpcc <8 x i16> @stest_f16i16(<8 x half> %x) { +; CHECK-LABEL: stest_f16i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmovx.f16 s12, s2 +; CHECK-NEXT: vmovx.f16 s10, s3 +; CHECK-NEXT: vcvt.s32.f16 s14, s3 +; CHECK-NEXT: vcvt.s32.f16 s2, s2 +; CHECK-NEXT: vcvt.s32.f16 s10, s10 +; CHECK-NEXT: vcvt.s32.f16 s12, s12 +; CHECK-NEXT: vmov r1, s14 +; CHECK-NEXT: vmovx.f16 s6, s0 +; CHECK-NEXT: vmov r2, s2 +; CHECK-NEXT: vmovx.f16 s4, s1 +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s8, s1 +; CHECK-NEXT: vcvt.s32.f16 s0, s0 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: vcvt.s32.f16 s4, s4 +; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s6, s6 +; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vqmovnb.s32 q3, q4 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: vstrh.32 q3, [r0, #8] +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: vqmovnb.s32 q0, q0 +; CHECK-NEXT: vstrh.32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <8 x half> %x to <8 x i32> + %0 = icmp slt <8 x i32> %conv, + %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> + %1 = icmp sgt <8 x i32> %spec.store.select, + %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> + %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> + ret <8 x i16> %conv6 +} + +define arm_aapcs_vfpcc <8 x i16> @utesth_f16i16(<8 x half> %x) { +; CHECK-LABEL: utesth_f16i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmovx.f16 s12, s2 +; CHECK-NEXT: vmovx.f16 s10, s3 +; CHECK-NEXT: vcvt.u32.f16 s14, s3 +; CHECK-NEXT: vcvt.u32.f16 s2, s2 +; CHECK-NEXT: vcvt.u32.f16 s10, s10 +; CHECK-NEXT: vcvt.u32.f16 s12, s12 +; CHECK-NEXT: vmov r1, s14 +; CHECK-NEXT: vmovx.f16 s8, s1 +; CHECK-NEXT: vmov r2, s2 +; CHECK-NEXT: vcvt.u32.f16 s6, s0 +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vcvt.u32.f16 s4, s1 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: vcvt.u32.f16 s8, s8 +; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vcvt.u32.f16 s0, s0 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vqmovnb.u32 q3, q4 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmov q1[2], q1[0], r2, r1 +; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmovlb.u16 q3, q3 +; CHECK-NEXT: vmov q1[3], q1[1], r2, r1 +; CHECK-NEXT: vstrh.32 q3, [r0, #8] +; CHECK-NEXT: vqmovnb.u32 q0, q1 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vstrh.32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %conv = fptoui <8 x half> %x to <8 x i32> + %0 = icmp ult <8 x i32> %conv, + %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> + %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16> + ret <8 x i16> %conv6 +} + +define arm_aapcs_vfpcc <8 x i16> @ustest_f16i16(<8 x half> %x) { +; CHECK-LABEL: ustest_f16i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vcvt.s32.f16 s10, s0 +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: vcvt.s32.f16 s14, s0 +; CHECK-NEXT: vmovx.f16 s0, s3 +; CHECK-NEXT: vcvt.s32.f16 s9, s0 +; CHECK-NEXT: vmovx.f16 s0, s2 +; CHECK-NEXT: vcvt.s32.f16 s13, s3 +; CHECK-NEXT: vcvt.s32.f16 s15, s2 +; CHECK-NEXT: vcvt.s32.f16 s11, s0 +; CHECK-NEXT: vmov r1, s13 +; CHECK-NEXT: vmov r2, s15 +; CHECK-NEXT: vmovx.f16 s4, s1 +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s8, s1 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vcvt.s32.f16 s12, s4 +; CHECK-NEXT: vmov r2, s11 +; CHECK-NEXT: vmov.i32 q0, #0xffff +; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: vmov r2, s10 +; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: vmov r1, s12 +; CHECK-NEXT: vmov r2, s14 +; CHECK-NEXT: vmin.s32 q4, q4, q0 +; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmin.s32 q0, q2, q0 +; CHECK-NEXT: vmax.s32 q4, q4, q1 +; CHECK-NEXT: vmax.s32 q0, q0, q1 +; CHECK-NEXT: vstrh.32 q4, [r0, #8] +; CHECK-NEXT: vstrh.32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <8 x half> %x to <8 x i32> + %0 = icmp slt <8 x i32> %conv, + %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> + %1 = icmp sgt <8 x i32> %spec.store.select, zeroinitializer + %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> zeroinitializer + %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> + ret <8 x i16> %conv6 +} + +; i64 saturate + +define arm_aapcs_vfpcc <2 x i64> @stest_f64i64(<2 x double> %x) { +; CHECK-LABEL: stest_f64i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: mvn r9, #-2147483648 +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: subs.w r7, r0, #-1 +; CHECK-NEXT: mov.w r5, #-1 +; CHECK-NEXT: sbcs.w r7, r1, r9 +; CHECK-NEXT: mov.w r10, #-2147483648 +; CHECK-NEXT: sbcs r7, r2, #0 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r11, r0, r5, ne +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: csel r1, r1, r9, ne +; CHECK-NEXT: rsbs.w r0, r11, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: sbcs.w r0, r10, r1 +; CHECK-NEXT: sbcs.w r0, r5, r2 +; CHECK-NEXT: sbcs.w r0, r5, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r8, r1, r10, ne +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: subs.w r6, r0, #-1 +; CHECK-NEXT: sbcs.w r6, r1, r9 +; CHECK-NEXT: sbcs r6, r2, #0 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: csel r1, r1, r9, ne +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: csel r2, r2, r6, ne +; CHECK-NEXT: rsbs r6, r0, #0 +; CHECK-NEXT: sbcs.w r6, r10, r1 +; CHECK-NEXT: sbcs.w r2, r5, r2 +; CHECK-NEXT: sbcs.w r2, r5, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r10, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r11, r7, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r8 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i128> + %0 = icmp slt <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %1 = icmp sgt <2 x i128> %spec.store.select, + %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @utest_f64i64(<2 x double> %x) { +; CHECK-LABEL: utest_f64i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __fixunsdfti +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: vmov r4, r1, d8 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: csel r5, r0, r7, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __fixunsdfti +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r8, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +entry: + %conv = fptoui <2 x double> %x to <2 x i128> + %0 = icmp ult <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @ustest_f64i64(<2 x double> %x) { +; CHECK-LABEL: ustest_f64i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: vmov r12, lr, d8 +; CHECK-NEXT: subs r5, r2, #1 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: mov.w r8, #1 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: csel r5, r1, r6, ne +; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: rsbs r1, r0, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs.w r1, r4, r5 +; CHECK-NEXT: sbcs.w r1, r4, r2 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: sbcs.w r1, r4, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r9, r0, r6, ne +; CHECK-NEXT: mov r0, r12 +; CHECK-NEXT: mov r1, lr +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: subs r7, r2, #1 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r1, r1, r7, ne +; CHECK-NEXT: rsbs r7, r0, #0 +; CHECK-NEXT: sbcs.w r7, r4, r1 +; CHECK-NEXT: sbcs.w r2, r4, r2 +; CHECK-NEXT: sbcs.w r2, r4, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r2, r5, r6, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i128> + %0 = icmp slt <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer + %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @stest_f32i64(<2 x float> %x) { +; CHECK-LABEL: stest_f32i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vmov r8, r0, d0 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs.w r7, r0, #-1 +; CHECK-NEXT: mvn r10, #-2147483648 +; CHECK-NEXT: sbcs.w r7, r1, r10 +; CHECK-NEXT: mov.w r11, #-2147483648 +; CHECK-NEXT: sbcs r7, r2, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: mov.w r7, #-1 +; CHECK-NEXT: csel r1, r1, r10, ne +; CHECK-NEXT: csel r9, r0, r7, ne +; CHECK-NEXT: rsbs.w r0, r9, #0 +; CHECK-NEXT: sbcs.w r0, r11, r1 +; CHECK-NEXT: sbcs.w r0, r7, r2 +; CHECK-NEXT: sbcs.w r0, r7, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r1, r11, ne +; CHECK-NEXT: str r0, [sp] @ 4-byte Spill +; CHECK-NEXT: mov r0, r8 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs.w r6, r0, #-1 +; CHECK-NEXT: sbcs.w r6, r1, r10 +; CHECK-NEXT: sbcs r6, r2, #0 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r1, r1, r10, ne +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: csel r2, r2, r6, ne +; CHECK-NEXT: rsbs r6, r0, #0 +; CHECK-NEXT: sbcs.w r6, r11, r1 +; CHECK-NEXT: sbcs.w r2, r7, r2 +; CHECK-NEXT: sbcs.w r2, r7, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r11, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r2, r9, r4, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x float> %x to <2 x i128> + %0 = icmp slt <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %1 = icmp sgt <2 x i128> %spec.store.select, + %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @utest_f32i64(<2 x float> %x) { +; CHECK-LABEL: utest_f32i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: vmov r4, r0, d0 +; CHECK-NEXT: bl __fixunssfti +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: subs r1, r2, #1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r5, r0, r7, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: movs r6, #0 +; CHECK-NEXT: bl __fixunssfti +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r8, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +entry: + %conv = fptoui <2 x float> %x to <2 x i128> + %0 = icmp ult <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @ustest_f32i64(<2 x float> %x) { +; CHECK-LABEL: ustest_f32i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vmov r4, r0, d0 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs r7, r2, #1 +; CHECK-NEXT: mov.w r9, #1 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r6, r1, r7, ne +; CHECK-NEXT: csel r2, r2, r9, ne +; CHECK-NEXT: rsbs r1, r0, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: sbcs.w r1, r5, r6 +; CHECK-NEXT: sbcs.w r1, r5, r2 +; CHECK-NEXT: sbcs.w r1, r5, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r8, r0, r7, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs r4, r2, #1 +; CHECK-NEXT: sbcs r4, r3, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: csel r2, r2, r9, ne +; CHECK-NEXT: csel r3, r3, r4, ne +; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: rsbs r4, r0, #0 +; CHECK-NEXT: sbcs.w r4, r5, r1 +; CHECK-NEXT: sbcs.w r2, r5, r2 +; CHECK-NEXT: sbcs.w r2, r5, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r6, r7, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r8 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <2 x float> %x to <2 x i128> + %0 = icmp slt <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer + %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @stest_f16i64(<2 x half> %x) { +; CHECK-LABEL: stest_f16i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: subs.w r7, r0, #-1 +; CHECK-NEXT: mvn r9, #-2147483648 +; CHECK-NEXT: sbcs.w r7, r1, r9 +; CHECK-NEXT: mov.w r6, #-1 +; CHECK-NEXT: sbcs r7, r2, #0 +; CHECK-NEXT: mov.w r10, #-2147483648 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r11, r0, r6, ne +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: csel r1, r1, r9, ne +; CHECK-NEXT: rsbs.w r0, r11, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: sbcs.w r0, r10, r1 +; CHECK-NEXT: sbcs.w r0, r6, r2 +; CHECK-NEXT: sbcs.w r0, r6, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: csel r8, r1, r10, ne +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: subs.w r5, r0, #-1 +; CHECK-NEXT: sbcs.w r5, r1, r9 +; CHECK-NEXT: sbcs r5, r2, #0 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: csel r1, r1, r9, ne +; CHECK-NEXT: csel r3, r3, r5, ne +; CHECK-NEXT: csel r2, r2, r5, ne +; CHECK-NEXT: rsbs r5, r0, #0 +; CHECK-NEXT: sbcs.w r5, r10, r1 +; CHECK-NEXT: sbcs.w r2, r6, r2 +; CHECK-NEXT: sbcs.w r2, r6, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r10, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r11, r7, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r8 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x half> %x to <2 x i128> + %0 = icmp slt <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %1 = icmp sgt <2 x i128> %spec.store.select, + %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @utesth_f16i64(<2 x half> %x) { +; CHECK-LABEL: utesth_f16i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, lr} +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixunshfti +; CHECK-NEXT: mov r4, r1 +; CHECK-NEXT: subs r1, r2, #1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r7, r0, r6, ne +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: bl __fixunshfti +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r2, r4, r6, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} +entry: + %conv = fptoui <2 x half> %x to <2 x i128> + %0 = icmp ult <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @ustest_f16i64(<2 x half> %x) { +; CHECK-LABEL: ustest_f16i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: subs r7, r2, #1 +; CHECK-NEXT: mov.w r8, #1 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: csel r5, r1, r7, ne +; CHECK-NEXT: rsbs r1, r0, #0 +; CHECK-NEXT: sbcs.w r1, r4, r5 +; CHECK-NEXT: sbcs.w r1, r4, r2 +; CHECK-NEXT: sbcs.w r1, r4, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r9, r0, r6, ne +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: subs r7, r2, #1 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r1, r1, r7, ne +; CHECK-NEXT: rsbs r7, r0, #0 +; CHECK-NEXT: sbcs.w r7, r4, r1 +; CHECK-NEXT: sbcs.w r2, r4, r2 +; CHECK-NEXT: sbcs.w r2, r4, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r2, r5, r6, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <2 x half> %x to <2 x i128> + %0 = icmp slt <2 x i128> %conv, + %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> + %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer + %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + + + +; i32 saturate + +define arm_aapcs_vfpcc <2 x i32> @stest_f64i32_mm(<2 x double> %x) { +; CHECK-LABEL: stest_f64i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: mov.w r4, #-2147483648 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: mvn r5, #-2147483648 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp r0, r5 +; CHECK-NEXT: csel r0, r0, r5, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r1, r8, mi +; CHECK-NEXT: cmp.w r6, #-1 +; CHECK-NEXT: cset r1, gt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r3, r0, r4, hi +; CHECK-NEXT: adds r0, r6, #1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: cset r7, eq +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r7, r3, r2, ne +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp r0, r5 +; CHECK-NEXT: csel r0, r0, r5, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r8, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r4, hi +; CHECK-NEXT: adds r3, r1, #1 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: orr.w r1, r1, r1, asr #31 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: orr.w r2, r6, r6, asr #31 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i64> + %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> ) + %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> ) + %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> + ret <2 x i32> %conv6 +} + +define arm_aapcs_vfpcc <2 x i32> @utest_f64i32_mm(<2 x double> %x) { +; CHECK-LABEL: utest_f64i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: vmov r2, r3, d8 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: mov.w r4, #-1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vldr s17, .LCPI28_0 +; CHECK-NEXT: csel r5, r0, r4, ne +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov s18, r5 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: vmov.f32 s19, s17 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: vmov s16, r0 +; CHECK-NEXT: vmov q0, q4 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI28_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 +entry: + %conv = fptoui <2 x double> %x to <2 x i64> + %spec.store.select = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %conv, <2 x i64> ) + %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32> + ret <2 x i32> %conv6 +} + +define arm_aapcs_vfpcc <2 x i32> @ustest_f64i32_mm(<2 x double> %x) { +; CHECK-LABEL: ustest_f64i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: vmov r2, r12, d8 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, mi +; CHECK-NEXT: mov.w r5, #-1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: vldr s17, .LCPI29_0 +; CHECK-NEXT: csel r3, r0, r5, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r4, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: csel r0, r0, r3, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r4, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, gt +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r3, r0, r3, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r3, ne +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r12 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov s18, r6 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: vmov.f32 s19, s17 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r4, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov s16, r0 +; CHECK-NEXT: vmov q0, q4 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI29_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 +entry: + %conv = fptosi <2 x double> %x to <2 x i64> + %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> ) + %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> zeroinitializer) + %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> + ret <2 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @stest_f32i32_mm(<4 x float> %x) { +; CHECK-LABEL: stest_f32i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: mov.w r6, #-2147483648 +; CHECK-NEXT: vmov r0, r10, d9 +; CHECK-NEXT: mvn r7, #-2147483648 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r7, ne +; CHECK-NEXT: cmp r0, r7 +; CHECK-NEXT: csel r0, r0, r7, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r8, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r6, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r3, r0, r6, hi +; CHECK-NEXT: adds r0, r1, #1 +; CHECK-NEXT: vmov r0, r11, d8 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r9, r3, r2, ne +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r7, ne +; CHECK-NEXT: cmp r0, r7 +; CHECK-NEXT: csel r0, r0, r7, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r8, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r6, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r6, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r4, r0, r2, ne +; CHECK-NEXT: mov r0, r10 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r7, ne +; CHECK-NEXT: cmp r0, r7 +; CHECK-NEXT: csel r0, r0, r7, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r8, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r6, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r6, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r5, r0, r2, ne +; CHECK-NEXT: mov r0, r11 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r4, r9 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r7, ne +; CHECK-NEXT: cmp r0, r7 +; CHECK-NEXT: csel r0, r0, r7, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r8, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r6, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r6, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <4 x float> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) + %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> ) + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @utest_f32i32_mm(<4 x float> %x) { +; CHECK-LABEL: utest_f32i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r4, d9 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: vmov r2, r5, d8 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: mov.w r6, #-1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r8, r0, r6, ne +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r7, r0, r6, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r4, r0, r6, ne +; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: bl __aeabi_f2ulz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r7, r8 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: vmov q0[3], q0[1], r0, r4 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +entry: + %conv = fptoui <4 x float> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> ) + %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @ustest_f32i32_mm(<4 x float> %x) { +; CHECK-LABEL: ustest_f32i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: mov.w r9, #-1 +; CHECK-NEXT: vmov r0, r4, d9 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r9, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r1, r7, mi +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r1, gt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r2, r1, ne +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vmov r0, r5, d8 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r8, r2, r1, ne +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r9, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r7, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r2, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r9, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r7, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r4, r0, r2, ne +; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r6, r8 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r9, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r7, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[3], q0[1], r0, r4 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <4 x float> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) + %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer) + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @stest_f16i32_mm(<4 x half> %x) { +; CHECK-LABEL: stest_f16i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: mvn r4, #-2147483648 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: mov.w r9, #0 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: mov.w r5, #-2147483648 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r0, r4 +; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r9, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r5, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r8, r0, r2, ne +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r0, r4 +; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r9, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r5, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r7, r0, r2, ne +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r0, r4 +; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r9, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r5, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r2, ne +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r7, r8 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r0, r4 +; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r9, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r5, ne +; CHECK-NEXT: cmp.w r0, #-2147483648 +; CHECK-NEXT: csel r0, r0, r5, hi +; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[3], q0[1], r0, r6 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <4 x half> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) + %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> ) + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @utesth_f16i32_mm(<4 x half> %x) { +; CHECK-LABEL: utesth_f16i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, lr} +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: mov.w r4, #-1 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r5, r0, r4, ne +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r4, ne +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r7, r0, r4, ne +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r6, r5 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: vmov q0[3], q0[1], r0, r7 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} +entry: + %conv = fptoui <4 x half> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> ) + %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @ustest_f16i32_mm(<4 x half> %x) { +; CHECK-LABEL: ustest_f16i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: mov.w r4, #-1 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r5, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r8, r0, r2, ne +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r5, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r7, r0, r2, ne +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r5, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r2, ne +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r7, r8 +; CHECK-NEXT: cset r2, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r4, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r1, r5, mi +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r2, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[3], q0[1], r0, r6 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +entry: + %conv = fptosi <4 x half> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) + %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer) + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +; i16 saturate + +define arm_aapcs_vfpcc <2 x i16> @stest_f64i16_mm(<2 x double> %x) { +; CHECK-LABEL: stest_f64i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: movw r2, #32767 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: vmov r1, r3, d1 +; CHECK-NEXT: movw lr, #32768 +; CHECK-NEXT: vmov r0, r6, d0 +; CHECK-NEXT: movt lr, #65535 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r5, mi +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r5, r1, r2, ne +; CHECK-NEXT: cmp r1, r2 +; CHECK-NEXT: csel r1, r1, r2, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r4, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r5, r1, r5, ne +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r1, r3, r12, mi +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: cset r3, gt +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r3, r5, lr, ne +; CHECK-NEXT: cmn.w r5, #32768 +; CHECK-NEXT: csel r5, r5, lr, hi +; CHECK-NEXT: adds r4, r1, #1 +; CHECK-NEXT: cset r4, eq +; CHECK-NEXT: orr.w r1, r1, r1, asr #31 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r3, r5, r3, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: cset r4, mi +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r4, r0, r2, ne +; CHECK-NEXT: cmp r0, r2 +; CHECK-NEXT: csel r0, r0, r2, lo +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: cset r2, eq +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r2, r6, r12, mi +; CHECK-NEXT: cmp.w r2, #-1 +; CHECK-NEXT: cset r6, gt +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r6, r0, lr, ne +; CHECK-NEXT: cmn.w r0, #32768 +; CHECK-NEXT: csel r0, r0, lr, hi +; CHECK-NEXT: adds r5, r2, #1 +; CHECK-NEXT: cset r5, eq +; CHECK-NEXT: orr.w r2, r2, r2, asr #31 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r3 +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r6, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i32> + %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> ) + %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> ) + %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> + ret <2 x i16> %conv6 +} + +define arm_aapcs_vfpcc <2 x i16> @utest_f64i16_mm(<2 x double> %x) { +; CHECK-LABEL: utest_f64i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: movw r5, #65535 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: cmp r0, r5 +; CHECK-NEXT: csel r0, r0, r5, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: cset r4, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r2, r5 +; CHECK-NEXT: csel r2, r2, r5, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r4, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r2, r2, r5, ne +; CHECK-NEXT: vmov q0[2], q0[0], r2, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptoui <2 x double> %x to <2 x i32> + %spec.store.select = call <2 x i32> @llvm.umin.v2i32(<2 x i32> %conv, <2 x i32> ) + %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16> + ret <2 x i16> %conv6 +} + +define arm_aapcs_vfpcc <2 x i16> @ustest_f64i16_mm(<2 x double> %x) { +; CHECK-LABEL: ustest_f64i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: movw r4, #65535 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: vmov r1, r0, d0 +; CHECK-NEXT: vldr s1, .LCPI38_0 +; CHECK-NEXT: vmov.f32 s3, s1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r5, mi +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r12, r2, r4, ne +; CHECK-NEXT: cmp r2, r4 +; CHECK-NEXT: csel r2, r2, r4, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r5, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r2, r2, r12, ne +; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r3, r3, r12, mi +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r5, gt +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r5, r2, r5, ne +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r5, ne +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r3, mi +; CHECK-NEXT: vmov s2, r2 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r3, r1, r4, ne +; CHECK-NEXT: cmp r1, r4 +; CHECK-NEXT: csel r1, r1, r4, lo +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r5, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r3, ne +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r0, r0, r12, mi +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r3, gt +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r3, r1, r3, ne +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r0, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r0, r1, r3, ne +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI38_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 +entry: + %conv = fptosi <2 x double> %x to <2 x i32> + %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> ) + %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> zeroinitializer) + %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> + ret <2 x i16> %conv6 +} + +define arm_aapcs_vfpcc <4 x i16> @stest_f32i16_mm(<4 x float> %x) { +; CHECK-LABEL: stest_f32i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvt.s32.f32 q0, q0 +; CHECK-NEXT: vqmovnb.s32 q0, q0 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <4 x float> %x to <4 x i32> + %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> ) + %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> ) + %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> + ret <4 x i16> %conv6 +} + +define arm_aapcs_vfpcc <4 x i16> @utest_f32i16_mm(<4 x float> %x) { +; CHECK-LABEL: utest_f32i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvt.u32.f32 q0, q0 +; CHECK-NEXT: vqmovnb.u32 q0, q0 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %conv = fptoui <4 x float> %x to <4 x i32> + %spec.store.select = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %conv, <4 x i32> ) + %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16> + ret <4 x i16> %conv6 +} + +define arm_aapcs_vfpcc <4 x i16> @ustest_f32i16_mm(<4 x float> %x) { +; CHECK-LABEL: ustest_f32i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q1, #0xffff +; CHECK-NEXT: vcvt.s32.f32 q0, q0 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmin.s32 q0, q0, q1 +; CHECK-NEXT: vmax.s32 q0, q0, q2 +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <4 x float> %x to <4 x i32> + %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> ) + %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer) + %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> + ret <4 x i16> %conv6 +} + +define arm_aapcs_vfpcc <8 x i16> @stest_f16i16_mm(<8 x half> %x) { +; CHECK-LABEL: stest_f16i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmovx.f16 s12, s2 +; CHECK-NEXT: vmovx.f16 s10, s3 +; CHECK-NEXT: vcvt.s32.f16 s14, s3 +; CHECK-NEXT: vcvt.s32.f16 s2, s2 +; CHECK-NEXT: vcvt.s32.f16 s10, s10 +; CHECK-NEXT: vcvt.s32.f16 s12, s12 +; CHECK-NEXT: vmov r1, s14 +; CHECK-NEXT: vmovx.f16 s6, s0 +; CHECK-NEXT: vmov r2, s2 +; CHECK-NEXT: vmovx.f16 s4, s1 +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s8, s1 +; CHECK-NEXT: vcvt.s32.f16 s0, s0 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: vcvt.s32.f16 s4, s4 +; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s6, s6 +; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vqmovnb.s32 q3, q4 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: vstrh.32 q3, [r0, #8] +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: vqmovnb.s32 q0, q0 +; CHECK-NEXT: vstrh.32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <8 x half> %x to <8 x i32> + %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> ) + %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> ) + %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> + ret <8 x i16> %conv6 +} + +define arm_aapcs_vfpcc <8 x i16> @utesth_f16i16_mm(<8 x half> %x) { +; CHECK-LABEL: utesth_f16i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmovx.f16 s12, s2 +; CHECK-NEXT: vmovx.f16 s10, s3 +; CHECK-NEXT: vcvt.u32.f16 s14, s3 +; CHECK-NEXT: vcvt.u32.f16 s2, s2 +; CHECK-NEXT: vcvt.u32.f16 s10, s10 +; CHECK-NEXT: vcvt.u32.f16 s12, s12 +; CHECK-NEXT: vmov r1, s14 +; CHECK-NEXT: vmovx.f16 s8, s1 +; CHECK-NEXT: vmov r2, s2 +; CHECK-NEXT: vcvt.u32.f16 s6, s0 +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vcvt.u32.f16 s4, s1 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: vcvt.u32.f16 s8, s8 +; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vcvt.u32.f16 s0, s0 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vqmovnb.u32 q3, q4 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmov q1[2], q1[0], r2, r1 +; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmovlb.u16 q3, q3 +; CHECK-NEXT: vmov q1[3], q1[1], r2, r1 +; CHECK-NEXT: vstrh.32 q3, [r0, #8] +; CHECK-NEXT: vqmovnb.u32 q0, q1 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vstrh.32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %conv = fptoui <8 x half> %x to <8 x i32> + %spec.store.select = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %conv, <8 x i32> ) + %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16> + ret <8 x i16> %conv6 +} + +define arm_aapcs_vfpcc <8 x i16> @ustest_f16i16_mm(<8 x half> %x) { +; CHECK-LABEL: ustest_f16i16_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vcvt.s32.f16 s10, s0 +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: vcvt.s32.f16 s14, s0 +; CHECK-NEXT: vmovx.f16 s0, s3 +; CHECK-NEXT: vcvt.s32.f16 s9, s0 +; CHECK-NEXT: vmovx.f16 s0, s2 +; CHECK-NEXT: vcvt.s32.f16 s13, s3 +; CHECK-NEXT: vcvt.s32.f16 s15, s2 +; CHECK-NEXT: vcvt.s32.f16 s11, s0 +; CHECK-NEXT: vmov r1, s13 +; CHECK-NEXT: vmov r2, s15 +; CHECK-NEXT: vmovx.f16 s4, s1 +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s8, s1 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vcvt.s32.f16 s12, s4 +; CHECK-NEXT: vmov r2, s11 +; CHECK-NEXT: vmov.i32 q0, #0xffff +; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: vmov r2, s10 +; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: vmov r1, s12 +; CHECK-NEXT: vmov r2, s14 +; CHECK-NEXT: vmin.s32 q4, q4, q0 +; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmin.s32 q0, q2, q0 +; CHECK-NEXT: vmax.s32 q4, q4, q1 +; CHECK-NEXT: vmax.s32 q0, q0, q1 +; CHECK-NEXT: vstrh.32 q4, [r0, #8] +; CHECK-NEXT: vstrh.32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %conv = fptosi <8 x half> %x to <8 x i32> + %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> ) + %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> zeroinitializer) + %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> + ret <8 x i16> %conv6 +} + +; i64 saturate + +define arm_aapcs_vfpcc <2 x i64> @stest_f64i64_mm(<2 x double> %x) { +; CHECK-LABEL: stest_f64i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #24 +; CHECK-NEXT: sub sp, #24 +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: mov r10, r0 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: csel r1, r3, r0, mi +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: it ne +; CHECK-NEXT: andne.w r0, r2, r0, asr #31 +; CHECK-NEXT: mvn r11, #-2147483648 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: cmp r6, r11 +; CHECK-NEXT: mov r5, r3 +; CHECK-NEXT: add.w r3, r0, #1 +; CHECK-NEXT: csel r0, r6, r11, lo +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r7, r6, r11, mi +; CHECK-NEXT: orrs r2, r5 +; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill +; CHECK-NEXT: csel r8, r0, r7, eq +; CHECK-NEXT: mov.w r2, #-2147483648 +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: csel r0, r8, r2, gt +; CHECK-NEXT: cmp.w r8, #-2147483648 +; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill +; CHECK-NEXT: csel r1, r8, r2, hi +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov.w r9, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill +; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: cmp r1, r11 +; CHECK-NEXT: mov lr, r0 +; CHECK-NEXT: csel r7, r1, r11, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: csel r4, r1, r11, mi +; CHECK-NEXT: orrs r3, r2 +; CHECK-NEXT: str r3, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: csel r7, r7, r4, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r4, r0, r9, mi +; CHECK-NEXT: mov.w r3, #-2147483648 +; CHECK-NEXT: cmp.w r4, #-1 +; CHECK-NEXT: csel r9, r7, r3, gt +; CHECK-NEXT: cmp.w r7, #-2147483648 +; CHECK-NEXT: csel r12, r7, r3, hi +; CHECK-NEXT: mov r3, r0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: andne.w r3, r2, r3, asr #31 +; CHECK-NEXT: and.w r2, r3, r4 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: adds r2, #1 +; CHECK-NEXT: str r2, [sp] @ 4-byte Spill +; CHECK-NEXT: csel r12, r12, r9, eq +; CHECK-NEXT: cmp r6, r11 +; CHECK-NEXT: csel r6, r10, r3, lo +; CHECK-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; CHECK-NEXT: csel r6, r10, r6, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r5, r10, r3, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r5, r6, r5, eq +; CHECK-NEXT: cmp.w r8, #-2147483648 +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; CHECK-NEXT: csel r6, r5, r8, hi +; CHECK-NEXT: csel r6, r5, r6, eq +; CHECK-NEXT: cmp.w r2, #-1 +; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; CHECK-NEXT: csel r5, r5, r8, gt +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: csel r5, r6, r5, eq +; CHECK-NEXT: cmp r1, r11 +; CHECK-NEXT: csel r1, lr, r3, lo +; CHECK-NEXT: csel r1, lr, r1, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r0, lr, r3, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: cmp.w r7, #-2147483648 +; CHECK-NEXT: csel r1, r0, r8, hi +; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload +; CHECK-NEXT: csel r1, r0, r1, eq +; CHECK-NEXT: cmp.w r4, #-1 +; CHECK-NEXT: csel r0, r0, r8, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload +; CHECK-NEXT: vmov q0[3], q0[1], r12, r0 +; CHECK-NEXT: add sp, #24 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) + %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> ) + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @utest_f64i64_mm(<2 x double> %x) { +; CHECK-LABEL: utest_f64i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __fixunsdfti +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: vmov r4, r1, d8 +; CHECK-NEXT: eor r7, r2, #1 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: orr.w r7, r7, r3 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: csel r9, r0, r7, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __fixunsdfti +; CHECK-NEXT: eor r4, r2, #1 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: orr.w r4, r4, r3 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r2, r8, r5, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptoui <2 x double> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) + %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @ustest_f64i64_mm(<2 x double> %x) { +; CHECK-LABEL: ustest_f64i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: subs r6, r2, #1 +; CHECK-NEXT: eor r7, r2, #1 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: orr.w r7, r7, r3 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: mov.w r10, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r11, r1, r7, ne +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: cmp.w r11, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: csel r1, r0, r11, ne +; CHECK-NEXT: csel r6, r0, r1, eq +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: csel r1, r2, r10, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r10, mi +; CHECK-NEXT: csel r1, r1, r2, eq +; CHECK-NEXT: csel r2, r3, r4, mi +; CHECK-NEXT: rsbs r3, r1, #0 +; CHECK-NEXT: sbcs.w r3, r4, r2 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r3, r0, r7, ne +; CHECK-NEXT: orrs.w r9, r1, r2 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: csel r8, r6, r3, eq +; CHECK-NEXT: bl __fixdfti +; CHECK-NEXT: subs r5, r2, #1 +; CHECK-NEXT: eor r6, r2, #1 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: orr.w r6, r6, r3 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r1, ne +; CHECK-NEXT: csel r6, r0, r6, eq +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: csel r5, r2, r10, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r10, mi +; CHECK-NEXT: csel r3, r3, r4, mi +; CHECK-NEXT: csel r2, r5, r2, eq +; CHECK-NEXT: rsbs r5, r2, #0 +; CHECK-NEXT: sbcs.w r5, r4, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: csel r0, r6, r0, eq +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r3, r11, r7, ne +; CHECK-NEXT: cmp.w r9, #0 +; CHECK-NEXT: csel r3, r11, r3, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r7, r1, r4, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r7, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r8 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x double> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) + %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer) + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @stest_f32i64_mm(<2 x float> %x) { +; CHECK-LABEL: stest_f32i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #28 +; CHECK-NEXT: sub sp, #28 +; CHECK-NEXT: vmov r4, r0, d0 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: mov r11, r0 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: csel r1, r3, r0, mi +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: it ne +; CHECK-NEXT: andne.w r0, r2, r0, asr #31 +; CHECK-NEXT: mvn r10, #-2147483648 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: cmp r6, r10 +; CHECK-NEXT: mov r5, r3 +; CHECK-NEXT: add.w r3, r0, #1 +; CHECK-NEXT: csel r0, r6, r10, lo +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r7, r6, r10, mi +; CHECK-NEXT: orrs r2, r5 +; CHECK-NEXT: str r2, [sp, #16] @ 4-byte Spill +; CHECK-NEXT: csel r8, r0, r7, eq +; CHECK-NEXT: mov.w r2, #-2147483648 +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: csel r0, r8, r2, gt +; CHECK-NEXT: cmp.w r8, #-2147483648 +; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill +; CHECK-NEXT: csel r1, r8, r2, hi +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov.w r9, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: str r3, [sp, #12] @ 4-byte Spill +; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: cmp r1, r10 +; CHECK-NEXT: mov lr, r0 +; CHECK-NEXT: csel r7, r1, r10, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: csel r4, r1, r10, mi +; CHECK-NEXT: orrs.w r3, r2, r0 +; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill +; CHECK-NEXT: csel r7, r7, r4, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r4, r0, r9, mi +; CHECK-NEXT: mov.w r3, #-2147483648 +; CHECK-NEXT: cmp.w r4, #-1 +; CHECK-NEXT: csel r9, r7, r3, gt +; CHECK-NEXT: cmp.w r7, #-2147483648 +; CHECK-NEXT: csel r12, r7, r3, hi +; CHECK-NEXT: mov r3, r0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: andne.w r3, r2, r3, asr #31 +; CHECK-NEXT: and.w r2, r3, r4 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: adds r2, #1 +; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: csel r12, r12, r9, eq +; CHECK-NEXT: cmp r6, r10 +; CHECK-NEXT: csel r6, r11, r3, lo +; CHECK-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; CHECK-NEXT: csel r6, r11, r6, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r5, r11, r3, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r5, r6, r5, eq +; CHECK-NEXT: cmp.w r8, #-2147483648 +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: ldr r2, [sp, #20] @ 4-byte Reload +; CHECK-NEXT: csel r6, r5, r8, hi +; CHECK-NEXT: csel r6, r5, r6, eq +; CHECK-NEXT: cmp.w r2, #-1 +; CHECK-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; CHECK-NEXT: csel r5, r5, r8, gt +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; CHECK-NEXT: csel r5, r6, r5, eq +; CHECK-NEXT: cmp r1, r10 +; CHECK-NEXT: csel r1, lr, r3, lo +; CHECK-NEXT: csel r1, lr, r1, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r0, lr, r3, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: cmp.w r7, #-2147483648 +; CHECK-NEXT: csel r1, r0, r8, hi +; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: csel r1, r0, r1, eq +; CHECK-NEXT: cmp.w r4, #-1 +; CHECK-NEXT: csel r0, r0, r8, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: ldr r0, [sp, #24] @ 4-byte Reload +; CHECK-NEXT: vmov q0[3], q0[1], r12, r0 +; CHECK-NEXT: add sp, #28 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x float> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) + %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> ) + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @utest_f32i64_mm(<2 x float> %x) { +; CHECK-LABEL: utest_f32i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vmov r5, r0, d0 +; CHECK-NEXT: bl __fixunssfti +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: eor r1, r2, #1 +; CHECK-NEXT: orr.w r7, r1, r3 +; CHECK-NEXT: subs r1, r2, #1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r9, r0, r7, ne +; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: movs r6, #0 +; CHECK-NEXT: bl __fixunssfti +; CHECK-NEXT: eor r5, r2, #1 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: orr.w r5, r5, r3 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r2, r8, r4, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptoui <2 x float> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) + %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @ustest_f32i64_mm(<2 x float> %x) { +; CHECK-LABEL: ustest_f32i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vmov r9, r0, d0 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs r6, r2, #1 +; CHECK-NEXT: eor r7, r2, #1 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: orr.w r7, r7, r3 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: mov.w r11, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r8, r1, r7, ne +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: cmp.w r8, #0 +; CHECK-NEXT: csel r1, r0, r8, ne +; CHECK-NEXT: csel r1, r0, r1, eq +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: csel r7, r2, r11, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r11, mi +; CHECK-NEXT: csel r3, r3, r5, mi +; CHECK-NEXT: csel r2, r7, r2, eq +; CHECK-NEXT: rsbs r7, r2, #0 +; CHECK-NEXT: sbcs.w r7, r5, r3 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: orrs.w r10, r2, r3 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: str r0, [sp] @ 4-byte Spill +; CHECK-NEXT: mov r0, r9 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs r6, r2, #1 +; CHECK-NEXT: eor r4, r2, #1 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: orr.w r4, r4, r3 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r1, ne +; CHECK-NEXT: csel r6, r0, r6, eq +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: csel r4, r2, r11, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r11, mi +; CHECK-NEXT: csel r3, r3, r5, mi +; CHECK-NEXT: csel r2, r4, r2, eq +; CHECK-NEXT: rsbs r4, r2, #0 +; CHECK-NEXT: sbcs.w r4, r5, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: csel r0, r6, r0, eq +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r3, r8, r7, ne +; CHECK-NEXT: cmp.w r10, #0 +; CHECK-NEXT: csel r3, r8, r3, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r7, r1, r5, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload +; CHECK-NEXT: csel r1, r1, r7, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x float> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) + %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer) + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @stest_f16i64_mm(<2 x half> %x) { +; CHECK-LABEL: stest_f16i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .pad #24 +; CHECK-NEXT: sub sp, #24 +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: mov r10, r0 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: csel r1, r3, r0, mi +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: it ne +; CHECK-NEXT: andne.w r0, r2, r0, asr #31 +; CHECK-NEXT: mvn r11, #-2147483648 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: cmp r6, r11 +; CHECK-NEXT: mov r5, r3 +; CHECK-NEXT: add.w r3, r0, #1 +; CHECK-NEXT: csel r0, r6, r11, lo +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r7, r6, r11, mi +; CHECK-NEXT: orrs r2, r5 +; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill +; CHECK-NEXT: csel r8, r0, r7, eq +; CHECK-NEXT: mov.w r2, #-2147483648 +; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: csel r0, r8, r2, gt +; CHECK-NEXT: cmp.w r8, #-2147483648 +; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill +; CHECK-NEXT: csel r1, r8, r2, hi +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov.w r9, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill +; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: cmp r1, r11 +; CHECK-NEXT: mov lr, r0 +; CHECK-NEXT: csel r7, r1, r11, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: csel r4, r1, r11, mi +; CHECK-NEXT: orrs r3, r2 +; CHECK-NEXT: str r3, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: csel r7, r7, r4, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r4, r0, r9, mi +; CHECK-NEXT: mov.w r3, #-2147483648 +; CHECK-NEXT: cmp.w r4, #-1 +; CHECK-NEXT: csel r9, r7, r3, gt +; CHECK-NEXT: cmp.w r7, #-2147483648 +; CHECK-NEXT: csel r12, r7, r3, hi +; CHECK-NEXT: mov r3, r0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: it ne +; CHECK-NEXT: andne.w r3, r2, r3, asr #31 +; CHECK-NEXT: and.w r2, r3, r4 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: adds r2, #1 +; CHECK-NEXT: str r2, [sp] @ 4-byte Spill +; CHECK-NEXT: csel r12, r12, r9, eq +; CHECK-NEXT: cmp r6, r11 +; CHECK-NEXT: csel r6, r10, r3, lo +; CHECK-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; CHECK-NEXT: csel r6, r10, r6, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r5, r10, r3, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r5, r6, r5, eq +; CHECK-NEXT: cmp.w r8, #-2147483648 +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; CHECK-NEXT: csel r6, r5, r8, hi +; CHECK-NEXT: csel r6, r5, r6, eq +; CHECK-NEXT: cmp.w r2, #-1 +; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; CHECK-NEXT: csel r5, r5, r8, gt +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: csel r5, r6, r5, eq +; CHECK-NEXT: cmp r1, r11 +; CHECK-NEXT: csel r1, lr, r3, lo +; CHECK-NEXT: csel r1, lr, r1, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csel r0, lr, r3, mi +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: cmp.w r7, #-2147483648 +; CHECK-NEXT: csel r1, r0, r8, hi +; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload +; CHECK-NEXT: csel r1, r0, r1, eq +; CHECK-NEXT: cmp.w r4, #-1 +; CHECK-NEXT: csel r0, r0, r8, gt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r1, r0, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload +; CHECK-NEXT: vmov q0[3], q0[1], r12, r0 +; CHECK-NEXT: add sp, #24 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x half> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) + %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> ) + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @utesth_f16i64_mm(<2 x half> %x) { +; CHECK-LABEL: utesth_f16i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixunshfti +; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: eor r1, r2, #1 +; CHECK-NEXT: orr.w r6, r1, r3 +; CHECK-NEXT: subs r1, r2, #1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r9, r0, r6, ne +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: bl __fixunshfti +; CHECK-NEXT: eor r4, r2, #1 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbcs r2, r3, #0 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: orr.w r4, r4, r3 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r2, r8, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r2, r2, r6, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptoui <2 x half> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) + %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> + ret <2 x i64> %conv6 +} + +define arm_aapcs_vfpcc <2 x i64> @ustest_f16i64_mm(<2 x half> %x) { +; CHECK-LABEL: ustest_f16i64_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: subs r6, r2, #1 +; CHECK-NEXT: eor r7, r2, #1 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: orr.w r7, r7, r3 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: mov.w r10, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r11, r1, r7, ne +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: cmp.w r11, #0 +; CHECK-NEXT: csel r1, r0, r11, ne +; CHECK-NEXT: csel r1, r0, r1, eq +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: csel r7, r2, r10, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r10, mi +; CHECK-NEXT: csel r3, r3, r4, mi +; CHECK-NEXT: csel r2, r7, r2, eq +; CHECK-NEXT: rsbs r7, r2, #0 +; CHECK-NEXT: sbcs.w r7, r4, r3 +; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: orrs.w r9, r2, r3 +; CHECK-NEXT: csel r8, r1, r0, eq +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfti +; CHECK-NEXT: subs r5, r2, #1 +; CHECK-NEXT: eor r6, r2, #1 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: orr.w r6, r6, r3 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r6, r0, r1, ne +; CHECK-NEXT: csel r6, r0, r6, eq +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: csel r5, r2, r10, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r2, r10, mi +; CHECK-NEXT: csel r3, r3, r4, mi +; CHECK-NEXT: csel r2, r5, r2, eq +; CHECK-NEXT: rsbs r5, r2, #0 +; CHECK-NEXT: sbcs.w r5, r4, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: csel r0, r6, r0, eq +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r3, r11, r7, ne +; CHECK-NEXT: cmp.w r9, #0 +; CHECK-NEXT: csel r3, r11, r3, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r7, r1, r4, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r7, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r8 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +entry: + %conv = fptosi <2 x half> %x to <2 x i128> + %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) + %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer) + %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> + ret <2 x i64> %conv6 +} + +declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) +declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>) +declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) +declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) +declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) +declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) +declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) +declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) +declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) +declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) +declare <2 x i128> @llvm.smin.v2i128(<2 x i128>, <2 x i128>) +declare <2 x i128> @llvm.smax.v2i128(<2 x i128>, <2 x i128>) +declare <2 x i128> @llvm.umin.v2i128(<2 x i128>, <2 x i128>)