Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 81 additions & 0 deletions llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64

define <32 x i8> @xvsadd_b(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: xvsadd_b:
; CHECK: # %bb.0:
; CHECK-NEXT: xvadd.b $xr2, $xr0, $xr1
; CHECK-NEXT: xvslt.b $xr0, $xr2, $xr0
; CHECK-NEXT: xvslti.b $xr1, $xr1, 0
; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
; CHECK-NEXT: xvsrai.b $xr1, $xr2, 7
; CHECK-NEXT: xvbitrevi.b $xr1, $xr1, 7
; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
; CHECK-NEXT: ret
%ret = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
ret <32 x i8> %ret
}

define <16 x i16> @xvsadd_h(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: xvsadd_h:
; CHECK: # %bb.0:
; CHECK-NEXT: xvadd.h $xr2, $xr0, $xr1
; CHECK-NEXT: xvslt.h $xr0, $xr2, $xr0
; CHECK-NEXT: xvslti.h $xr1, $xr1, 0
; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
; CHECK-NEXT: xvsrai.h $xr1, $xr2, 15
; CHECK-NEXT: xvbitrevi.h $xr1, $xr1, 15
; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
; CHECK-NEXT: ret
%ret = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
ret <16 x i16> %ret
}

define <8 x i32> @xvsadd_w(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: xvsadd_w:
; CHECK: # %bb.0:
; CHECK-NEXT: xvadd.w $xr2, $xr0, $xr1
; CHECK-NEXT: xvslt.w $xr0, $xr2, $xr0
; CHECK-NEXT: xvslti.w $xr1, $xr1, 0
; CHECK-NEXT: xvxor.v $xr0, $xr1, $xr0
; CHECK-NEXT: xvsrai.w $xr1, $xr2, 31
; CHECK-NEXT: xvbitrevi.w $xr1, $xr1, 31
; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
; CHECK-NEXT: ret
%ret = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
ret <8 x i32> %ret
}

define <4 x i64> @xvsadd_d(<4 x i64> %a, <4 x i64> %b) {
; LA32-LABEL: xvsadd_d:
; LA32: # %bb.0:
; LA32-NEXT: xvadd.d $xr2, $xr0, $xr1
; LA32-NEXT: xvslt.d $xr0, $xr2, $xr0
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI3_0)
; LA32-NEXT: xvslti.d $xr1, $xr1, 0
; LA32-NEXT: xvxor.v $xr0, $xr1, $xr0
; LA32-NEXT: xvsrai.d $xr1, $xr2, 63
; LA32-NEXT: xvxor.v $xr1, $xr1, $xr3
; LA32-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
; LA32-NEXT: ret
;
; LA64-LABEL: xvsadd_d:
; LA64: # %bb.0:
; LA64-NEXT: xvadd.d $xr2, $xr0, $xr1
; LA64-NEXT: xvslt.d $xr0, $xr2, $xr0
; LA64-NEXT: xvslti.d $xr1, $xr1, 0
; LA64-NEXT: xvxor.v $xr0, $xr1, $xr0
; LA64-NEXT: xvsrai.d $xr1, $xr2, 63
; LA64-NEXT: xvbitrevi.d $xr1, $xr1, 63
; LA64-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0
; LA64-NEXT: ret
%ret = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
ret <4 x i64> %ret
}

declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
86 changes: 86 additions & 0 deletions llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64

define <32 x i8> @xvssub_b(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: xvssub_b:
; CHECK: # %bb.0:
; CHECK-NEXT: xvrepli.b $xr2, 0
; CHECK-NEXT: xvslt.b $xr2, $xr2, $xr1
; CHECK-NEXT: xvsub.b $xr1, $xr0, $xr1
; CHECK-NEXT: xvslt.b $xr0, $xr1, $xr0
; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
; CHECK-NEXT: xvsrai.b $xr2, $xr1, 7
; CHECK-NEXT: xvbitrevi.b $xr2, $xr2, 7
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: ret
%ret = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
ret <32 x i8> %ret
}

define <16 x i16> @xvssub_h(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: xvssub_h:
; CHECK: # %bb.0:
; CHECK-NEXT: xvrepli.b $xr2, 0
; CHECK-NEXT: xvslt.h $xr2, $xr2, $xr1
; CHECK-NEXT: xvsub.h $xr1, $xr0, $xr1
; CHECK-NEXT: xvslt.h $xr0, $xr1, $xr0
; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
; CHECK-NEXT: xvsrai.h $xr2, $xr1, 15
; CHECK-NEXT: xvbitrevi.h $xr2, $xr2, 15
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: ret
%ret = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
ret <16 x i16> %ret
}

define <8 x i32> @xvssub_w(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: xvssub_w:
; CHECK: # %bb.0:
; CHECK-NEXT: xvrepli.b $xr2, 0
; CHECK-NEXT: xvslt.w $xr2, $xr2, $xr1
; CHECK-NEXT: xvsub.w $xr1, $xr0, $xr1
; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0
; CHECK-NEXT: xvxor.v $xr0, $xr2, $xr0
; CHECK-NEXT: xvsrai.w $xr2, $xr1, 31
; CHECK-NEXT: xvbitrevi.w $xr2, $xr2, 31
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: ret
%ret = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
ret <8 x i32> %ret
}

define <4 x i64> @xvssub_d(<4 x i64> %a, <4 x i64> %b) {
; LA32-LABEL: xvssub_d:
; LA32: # %bb.0:
; LA32-NEXT: xvrepli.b $xr2, 0
; LA32-NEXT: xvslt.d $xr2, $xr2, $xr1
; LA32-NEXT: xvsub.d $xr1, $xr0, $xr1
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI3_0)
; LA32-NEXT: xvslt.d $xr0, $xr1, $xr0
; LA32-NEXT: xvxor.v $xr0, $xr2, $xr0
; LA32-NEXT: xvsrai.d $xr2, $xr1, 63
; LA32-NEXT: xvxor.v $xr2, $xr2, $xr3
; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; LA32-NEXT: ret
;
; LA64-LABEL: xvssub_d:
; LA64: # %bb.0:
; LA64-NEXT: xvrepli.b $xr2, 0
; LA64-NEXT: xvslt.d $xr2, $xr2, $xr1
; LA64-NEXT: xvsub.d $xr1, $xr0, $xr1
; LA64-NEXT: xvslt.d $xr0, $xr1, $xr0
; LA64-NEXT: xvxor.v $xr0, $xr2, $xr0
; LA64-NEXT: xvsrai.d $xr2, $xr1, 63
; LA64-NEXT: xvbitrevi.d $xr2, $xr2, 63
; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; LA64-NEXT: ret
%ret = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
ret <4 x i64> %ret
}

declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
55 changes: 55 additions & 0 deletions llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s

define <32 x i8> @xvuadd_b(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: xvuadd_b:
; CHECK: # %bb.0:
; CHECK-NEXT: xvxori.b $xr2, $xr1, 255
; CHECK-NEXT: xvmin.bu $xr0, $xr0, $xr2
; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
ret <32 x i8> %ret
}

define <16 x i16> @xvuadd_h(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: xvuadd_h:
; CHECK: # %bb.0:
; CHECK-NEXT: xvrepli.b $xr2, -1
; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
; CHECK-NEXT: xvmin.hu $xr0, $xr0, $xr2
; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
ret <16 x i16> %ret
}

define <8 x i32> @xvuadd_w(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: xvuadd_w:
; CHECK: # %bb.0:
; CHECK-NEXT: xvrepli.b $xr2, -1
; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
; CHECK-NEXT: xvmin.wu $xr0, $xr0, $xr2
; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
ret <8 x i32> %ret
}

define <4 x i64> @xvuadd_d(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: xvuadd_d:
; CHECK: # %bb.0:
; CHECK-NEXT: xvrepli.b $xr2, -1
; CHECK-NEXT: xvxor.v $xr2, $xr1, $xr2
; CHECK-NEXT: xvmin.du $xr0, $xr0, $xr2
; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
ret <4 x i64> %ret
}

declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
48 changes: 48 additions & 0 deletions llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s

define <32 x i8> @xvusub_b(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: xvusub_b:
; CHECK: # %bb.0:
; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
ret <32 x i8> %ret
}

define <16 x i16> @xvusub_h(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: xvusub_h:
; CHECK: # %bb.0:
; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
ret <16 x i16> %ret
}

define <8 x i32> @xvusub_w(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: xvusub_w:
; CHECK: # %bb.0:
; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %a, <8 x i32> %b)
ret <8 x i32> %ret
}

define <4 x i64> @xvusub_d(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: xvusub_d:
; CHECK: # %bb.0:
; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%ret = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %a, <4 x i64> %b)
ret <4 x i64> %ret
}

declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
81 changes: 81 additions & 0 deletions llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32
; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64

define <16 x i8> @vsadd_b(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: vsadd_b:
; CHECK: # %bb.0:
; CHECK-NEXT: vadd.b $vr2, $vr0, $vr1
; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0
; CHECK-NEXT: vslti.b $vr1, $vr1, 0
; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vsrai.b $vr1, $vr2, 7
; CHECK-NEXT: vbitrevi.b $vr1, $vr1, 7
; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
; CHECK-NEXT: ret
%ret = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
ret <16 x i8> %ret
}

define <8 x i16> @vsadd_h(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: vsadd_h:
; CHECK: # %bb.0:
; CHECK-NEXT: vadd.h $vr2, $vr0, $vr1
; CHECK-NEXT: vslt.h $vr0, $vr2, $vr0
; CHECK-NEXT: vslti.h $vr1, $vr1, 0
; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vsrai.h $vr1, $vr2, 15
; CHECK-NEXT: vbitrevi.h $vr1, $vr1, 15
; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
; CHECK-NEXT: ret
%ret = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
ret <8 x i16> %ret
}

define <4 x i32> @vsadd_w(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: vsadd_w:
; CHECK: # %bb.0:
; CHECK-NEXT: vadd.w $vr2, $vr0, $vr1
; CHECK-NEXT: vslt.w $vr0, $vr2, $vr0
; CHECK-NEXT: vslti.w $vr1, $vr1, 0
; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0
; CHECK-NEXT: vsrai.w $vr1, $vr2, 31
; CHECK-NEXT: vbitrevi.w $vr1, $vr1, 31
; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
; CHECK-NEXT: ret
%ret = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
ret <4 x i32> %ret
}

define <2 x i64> @vsadd_d(<2 x i64> %a, <2 x i64> %b) {
; LA32-LABEL: vsadd_d:
; LA32: # %bb.0:
; LA32-NEXT: vadd.d $vr2, $vr0, $vr1
; LA32-NEXT: vslt.d $vr0, $vr2, $vr0
; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
; LA32-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI3_0)
; LA32-NEXT: vslti.d $vr1, $vr1, 0
; LA32-NEXT: vxor.v $vr0, $vr1, $vr0
; LA32-NEXT: vsrai.d $vr1, $vr2, 63
; LA32-NEXT: vxor.v $vr1, $vr1, $vr3
; LA32-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
; LA32-NEXT: ret
;
; LA64-LABEL: vsadd_d:
; LA64: # %bb.0:
; LA64-NEXT: vadd.d $vr2, $vr0, $vr1
; LA64-NEXT: vslt.d $vr0, $vr2, $vr0
; LA64-NEXT: vslti.d $vr1, $vr1, 0
; LA64-NEXT: vxor.v $vr0, $vr1, $vr0
; LA64-NEXT: vsrai.d $vr1, $vr2, 63
; LA64-NEXT: vbitrevi.d $vr1, $vr1, 63
; LA64-NEXT: vbitsel.v $vr0, $vr2, $vr1, $vr0
; LA64-NEXT: ret
%ret = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
ret <2 x i64> %ret
}

declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
Loading