Skip to content

Conversation

ylzsx
Copy link
Contributor

@ylzsx ylzsx commented Sep 12, 2025

No description provided.

@llvmbot
Copy link
Member

llvmbot commented Sep 12, 2025

@llvm/pr-subscribers-backend-loongarch

Author: Zhaoxin Yang (ylzsx)

Changes

Patch is 20.07 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/158176.diff

8 Files Affected:

  • (added) llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll (+68)
  • (added) llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll (+72)
  • (added) llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll (+55)
  • (added) llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll (+48)
  • (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll (+68)
  • (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll (+72)
  • (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll (+55)
  • (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll (+48)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll
new file mode 100644
index 0000000000000..0b31f070db53a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sadd-sat.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @vsadd_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vsadd_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvadd.b $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvslt.b $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvslti.b $xr1, $xr1, 0
+; CHECK-NEXT:    xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvsrai.b $xr1, $xr2, 7
+; CHECK-NEXT:    xvbitrevi.b $xr1, $xr1, 7
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <32 x i8> @llvm.sadd.sat.v16i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %ret
+}
+
+define <16 x i16> @vsadd_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vsadd_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvadd.h $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvslt.h $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvslti.h $xr1, $xr1, 0
+; CHECK-NEXT:    xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvsrai.h $xr1, $xr2, 15
+; CHECK-NEXT:    xvbitrevi.h $xr1, $xr1, 15
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <16 x i16> @llvm.sadd.sat.v8i32(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %ret
+}
+
+define <8 x i32> @vsadd_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vsadd_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvadd.w $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvslt.w $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvslti.w $xr1, $xr1, 0
+; CHECK-NEXT:    xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvsrai.w $xr1, $xr2, 31
+; CHECK-NEXT:    xvbitrevi.w $xr1, $xr1, 31
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <8 x i32> @llvm.sadd.sat.v4i64(<8 x i32> %a, <8 x i32> %b)
+  ret <8 x i32> %ret
+}
+
+define <4 x i64> @vsadd_d(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: vsadd_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvadd.d $xr2, $xr0, $xr1
+; CHECK-NEXT:    xvslt.d $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvslti.d $xr1, $xr1, 0
+; CHECK-NEXT:    xvxor.v $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvsrai.d $xr1, $xr2, 63
+; CHECK-NEXT:    xvbitrevi.d $xr1, $xr1, 63
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <4 x i64> @llvm.sadd.sat.v2i128(<4 x i64> %a, <4 x i64> %b)
+  ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.sadd.sat.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.sadd.sat.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.sadd.sat.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.sadd.sat.v2i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll
new file mode 100644
index 0000000000000..428b5518fafa1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ssub-sat.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @vssub_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vssub_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, 0
+; CHECK-NEXT:    xvslt.b $xr2, $xr2, $xr1
+; CHECK-NEXT:    xvsub.b $xr1, $xr0, $xr1
+; CHECK-NEXT:    xvslt.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvsrai.b $xr2, $xr1, 7
+; CHECK-NEXT:    xvbitrevi.b $xr2, $xr2, 7
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <32 x i8> @llvm.ssub.sat.v16i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %ret
+}
+
+define <16 x i16> @vssub_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vssub_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, 0
+; CHECK-NEXT:    xvslt.h $xr2, $xr2, $xr1
+; CHECK-NEXT:    xvsub.h $xr1, $xr0, $xr1
+; CHECK-NEXT:    xvslt.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvsrai.h $xr2, $xr1, 15
+; CHECK-NEXT:    xvbitrevi.h $xr2, $xr2, 15
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <16 x i16> @llvm.ssub.sat.v8i32(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %ret
+}
+
+define <8 x i32> @vssub_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vssub_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, 0
+; CHECK-NEXT:    xvslt.w $xr2, $xr2, $xr1
+; CHECK-NEXT:    xvsub.w $xr1, $xr0, $xr1
+; CHECK-NEXT:    xvslt.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvsrai.w $xr2, $xr1, 31
+; CHECK-NEXT:    xvbitrevi.w $xr2, $xr2, 31
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <8 x i32> @llvm.ssub.sat.v4i64(<8 x i32> %a, <8 x i32> %b)
+  ret <8 x i32> %ret
+}
+
+define <4 x i64> @vssub_d(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: vssub_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, 0
+; CHECK-NEXT:    xvslt.d $xr2, $xr2, $xr1
+; CHECK-NEXT:    xvsub.d $xr1, $xr0, $xr1
+; CHECK-NEXT:    xvslt.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvxor.v $xr0, $xr2, $xr0
+; CHECK-NEXT:    xvsrai.d $xr2, $xr1, 63
+; CHECK-NEXT:    xvbitrevi.d $xr2, $xr2, 63
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr1, $xr2, $xr0
+; CHECK-NEXT:    ret
+  %ret = call <4 x i64> @llvm.ssub.sat.v2i128(<4 x i64> %a, <4 x i64> %b)
+  ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.ssub.sat.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.ssub.sat.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.ssub.sat.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.ssub.sat.v2i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll
new file mode 100644
index 0000000000000..35c4a154e3391
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/uadd-sat.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @vuadd_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vuadd_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvxori.b $xr2, $xr1, 255
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <32 x i8> @llvm.uadd.sat.v16i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %ret
+}
+
+define <16 x i16> @vuadd_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vuadd_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, -1
+; CHECK-NEXT:    xvxor.v $xr2, $xr1, $xr2
+; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <16 x i16> @llvm.uadd.sat.v8i32(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %ret
+}
+
+define <8 x i32> @vuadd_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vuadd_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, -1
+; CHECK-NEXT:    xvxor.v $xr2, $xr1, $xr2
+; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <8 x i32> @llvm.uadd.sat.v4i64(<8 x i32> %a, <8 x i32> %b)
+  ret <8 x i32> %ret
+}
+
+define <4 x i64> @vuadd_d(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: vuadd_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvrepli.b $xr2, -1
+; CHECK-NEXT:    xvxor.v $xr2, $xr1, $xr2
+; CHECK-NEXT:    xvmin.du $xr0, $xr0, $xr2
+; CHECK-NEXT:    xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <4 x i64> @llvm.uadd.sat.v2i128(<4 x i64> %a, <4 x i64> %b)
+  ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.uadd.sat.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.uadd.sat.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.uadd.sat.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.uadd.sat.v2i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll
new file mode 100644
index 0000000000000..5d010b30e1631
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/usub-sat.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @vusub_b(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: vusub_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <32 x i8> @llvm.usub.sat.v16i8(<32 x i8> %a, <32 x i8> %b)
+  ret <32 x i8> %ret
+}
+
+define <16 x i16> @vusub_h(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: vusub_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <16 x i16> @llvm.usub.sat.v8i32(<16 x i16> %a, <16 x i16> %b)
+  ret <16 x i16> %ret
+}
+
+define <8 x i32> @vusub_w(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: vusub_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <8 x i32> @llvm.usub.sat.v4i64(<8 x i32> %a, <8 x i32> %b)
+  ret <8 x i32> %ret
+}
+
+define <4 x i64> @vusub_d(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: vusub_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+  %ret = call <4 x i64> @llvm.usub.sat.v2i128(<4 x i64> %a, <4 x i64> %b)
+  ret <4 x i64> %ret
+}
+
+declare <32 x i8> @llvm.usub.sat.v16i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.usub.sat.v8i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.usub.sat.v4i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.usub.sat.v2i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll
new file mode 100644
index 0000000000000..471266f7f20d5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sadd-sat.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @vsadd_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vsadd_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vadd.b $vr2, $vr0, $vr1
+; CHECK-NEXT:    vslt.b $vr0, $vr2, $vr0
+; CHECK-NEXT:    vslti.b $vr1, $vr1, 0
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vsrai.b $vr1, $vr2, 7
+; CHECK-NEXT:    vbitrevi.b $vr1, $vr1, 7
+; CHECK-NEXT:    vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %ret
+}
+
+define <8 x i16> @vsadd_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vsadd_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vadd.h $vr2, $vr0, $vr1
+; CHECK-NEXT:    vslt.h $vr0, $vr2, $vr0
+; CHECK-NEXT:    vslti.h $vr1, $vr1, 0
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vsrai.h $vr1, $vr2, 15
+; CHECK-NEXT:    vbitrevi.h $vr1, $vr1, 15
+; CHECK-NEXT:    vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <8 x i16> @llvm.sadd.sat.v8i32(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %ret
+}
+
+define <4 x i32> @vsadd_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vsadd_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vadd.w $vr2, $vr0, $vr1
+; CHECK-NEXT:    vslt.w $vr0, $vr2, $vr0
+; CHECK-NEXT:    vslti.w $vr1, $vr1, 0
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vsrai.w $vr1, $vr2, 31
+; CHECK-NEXT:    vbitrevi.w $vr1, $vr1, 31
+; CHECK-NEXT:    vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <4 x i32> @llvm.sadd.sat.v4i64(<4 x i32> %a, <4 x i32> %b)
+  ret <4 x i32> %ret
+}
+
+define <2 x i64> @vsadd_d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vsadd_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vadd.d $vr2, $vr0, $vr1
+; CHECK-NEXT:    vslt.d $vr0, $vr2, $vr0
+; CHECK-NEXT:    vslti.d $vr1, $vr1, 0
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vsrai.d $vr1, $vr2, 63
+; CHECK-NEXT:    vbitrevi.d $vr1, $vr1, 63
+; CHECK-NEXT:    vbitsel.v $vr0, $vr2, $vr1, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <2 x i64> @llvm.sadd.sat.v2i128(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll
new file mode 100644
index 0000000000000..99462521da929
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ssub-sat.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @vssub_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vssub_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, 0
+; CHECK-NEXT:    vslt.b $vr2, $vr2, $vr1
+; CHECK-NEXT:    vsub.b $vr1, $vr0, $vr1
+; CHECK-NEXT:    vslt.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT:    vsrai.b $vr2, $vr1, 7
+; CHECK-NEXT:    vbitrevi.b $vr2, $vr2, 7
+; CHECK-NEXT:    vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %ret
+}
+
+define <8 x i16> @vssub_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vssub_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, 0
+; CHECK-NEXT:    vslt.h $vr2, $vr2, $vr1
+; CHECK-NEXT:    vsub.h $vr1, $vr0, $vr1
+; CHECK-NEXT:    vslt.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT:    vsrai.h $vr2, $vr1, 15
+; CHECK-NEXT:    vbitrevi.h $vr2, $vr2, 15
+; CHECK-NEXT:    vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <8 x i16> @llvm.ssub.sat.v8i32(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %ret
+}
+
+define <4 x i32> @vssub_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vssub_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, 0
+; CHECK-NEXT:    vslt.w $vr2, $vr2, $vr1
+; CHECK-NEXT:    vsub.w $vr1, $vr0, $vr1
+; CHECK-NEXT:    vslt.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT:    vsrai.w $vr2, $vr1, 31
+; CHECK-NEXT:    vbitrevi.w $vr2, $vr2, 31
+; CHECK-NEXT:    vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <4 x i32> @llvm.ssub.sat.v4i64(<4 x i32> %a, <4 x i32> %b)
+  ret <4 x i32> %ret
+}
+
+define <2 x i64> @vssub_d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vssub_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, 0
+; CHECK-NEXT:    vslt.d $vr2, $vr2, $vr1
+; CHECK-NEXT:    vsub.d $vr1, $vr0, $vr1
+; CHECK-NEXT:    vslt.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vxor.v $vr0, $vr2, $vr0
+; CHECK-NEXT:    vsrai.d $vr2, $vr1, 63
+; CHECK-NEXT:    vbitrevi.d $vr2, $vr2, 63
+; CHECK-NEXT:    vbitsel.v $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT:    ret
+  %ret = call <2 x i64> @llvm.ssub.sat.v2i128(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll
new file mode 100644
index 0000000000000..836603b462649
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/uadd-sat.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @vuadd_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vuadd_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxori.b $vr2, $vr1, 255
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr2
+; CHECK-NEXT:    vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %ret
+}
+
+define <8 x i16> @vuadd_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vuadd_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, -1
+; CHECK-NEXT:    vxor.v $vr2, $vr1, $vr2
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr2
+; CHECK-NEXT:    vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <8 x i16> @llvm.uadd.sat.v8i32(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %ret
+}
+
+define <4 x i32> @vuadd_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vuadd_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, -1
+; CHECK-NEXT:    vxor.v $vr2, $vr1, $vr2
+; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr2
+; CHECK-NEXT:    vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <4 x i32> @llvm.uadd.sat.v4i64(<4 x i32> %a, <4 x i32> %b)
+  ret <4 x i32> %ret
+}
+
+define <2 x i64> @vuadd_d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vuadd_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vrepli.b $vr2, -1
+; CHECK-NEXT:    vxor.v $vr2, $vr1, $vr2
+; CHECK-NEXT:    vmin.du $vr0, $vr0, $vr2
+; CHECK-NEXT:    vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <2 x i64> @llvm.uadd.sat.v2i128(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll
new file mode 100644
index 0000000000000..bc8882f2fc8e0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/usub-sat.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @vusub_b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vusub_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
+  ret <16 x i8> %ret
+}
+
+define <8 x i16> @vusub_h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vusub_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <8 x i16> @llvm.usub.sat.v8i32(<8 x i16> %a, <8 x i16> %b)
+  ret <8 x i16> %ret
+}
+
+define <4 x i32> @vusub_w(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vusub_w:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <4 x i32> @llvm.usub.sat.v4i64(<4 x i32> %a, <4 x i32> %b)
+  ret <4 x i32> %ret
+}
+
+define <2 x i64> @vusub_d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vusub_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+  %ret = call <2 x i64> @llvm.usub.sat.v2i128(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %ret
+}
+
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4...
[truncated]

@ylzsx ylzsx marked this pull request as draft September 12, 2025 02:47
@ylzsx ylzsx force-pushed the users/ylzsx/precommit-suadd-susub branch from 580cd1f to 7f017ae Compare September 12, 2025 04:01
@ylzsx ylzsx marked this pull request as ready for review September 12, 2025 04:01
@SixWeining SixWeining changed the title [LoongArch] Pre-commit tests for sadd/ssub/uadd/usub intrinsics [LoongArch] Pre-commit tests for saturation sadd/ssub/uadd/usub intrinsics Sep 15, 2025
@ylzsx ylzsx merged commit 1afadbf into main Sep 16, 2025
9 checks passed
@ylzsx ylzsx deleted the users/ylzsx/precommit-suadd-susub branch September 16, 2025 02:34
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants