diff --git a/llvm/test/CodeGen/AArch64/sve-setcc.ll b/llvm/test/CodeGen/AArch64/sve-setcc.ll index 191e988f57143..026c0dc89ae62 100644 --- a/llvm/test/CodeGen/AArch64/sve-setcc.ll +++ b/llvm/test/CodeGen/AArch64/sve-setcc.ll @@ -1,10 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s ; Ensure we use the CC result of SVE compare instructions when branching. define void @sve_cmplt_setcc(* %out, %in, %pg) { -; CHECK-LABEL: @sve_cmplt_setcc -; CHECK: cmplt p1.h, p0/z, z0.h, #0 -; CHECK-NEXT: b.eq +; CHECK-LABEL: sve_cmplt_setcc: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cmplt p1.h, p0/z, z0.h, #0 +; CHECK-NEXT: b.eq .LBB0_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: .LBB0_2: // %if.end +; CHECK-NEXT: ret entry: %0 = tail call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, %in, zeroinitializer) %1 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv8i1( %pg, %0) @@ -20,9 +26,14 @@ if.end: ; Ensure we use the inverted CC result of SVE compare instructions when branching. define void @sve_cmplt_setcc_inverted(* %out, %in, %pg) { -; CHECK-LABEL: @sve_cmplt_setcc_inverted -; CHECK: cmplt p1.h, p0/z, z0.h, #0 -; CHECK-NEXT: b.ne +; CHECK-LABEL: sve_cmplt_setcc_inverted: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cmplt p1.h, p0/z, z0.h, #0 +; CHECK-NEXT: b.ne .LBB1_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: .LBB1_2: // %if.end +; CHECK-NEXT: ret entry: %0 = tail call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, %in, zeroinitializer) %1 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv8i1( %pg, %0) @@ -38,9 +49,14 @@ if.end: ; Ensure we combine setcc and csel so as to not end up with an extra compare define void @sve_cmplt_setcc_hslo(* %out, %in, %pg) { -; CHECK-LABEL: @sve_cmplt_setcc_hslo -; CHECK: cmplt p1.h, p0/z, z0.h, #0 -; CHECK-NEXT: b.hs +; CHECK-LABEL: sve_cmplt_setcc_hslo: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cmplt p1.h, p0/z, z0.h, #0 +; CHECK-NEXT: b.hs .LBB2_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: .LBB2_2: // %if.end +; CHECK-NEXT: ret entry: %0 = tail call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, %in, zeroinitializer) %1 = tail call i1 @llvm.aarch64.sve.ptest.last.nxv8i1( %pg, %0)