-
Notifications
You must be signed in to change notification settings - Fork 13.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[LoongArch] Pre-commit tests for absolute difference #132898
base: main
Are you sure you want to change the base?
Conversation
@llvm/pr-subscribers-backend-loongarch Author: None (tangaac) ChangesPatch is 23.56 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/132898.diff 2 Files Affected:
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
new file mode 100644
index 0000000000000..42152bc7f5fcf
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+;; Mostly copy from AArch64/neon-abd.ll
+
+;
+; VABDS_[B/H/W/D]
+;
+define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) #0 {
+; CHECK-LABEL: xvabsd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.b $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.sext = sext <32 x i8> %a to <32 x i16>
+ %b.sext = sext <32 x i8> %b to <32 x i16>
+ %sub = sub <32 x i16> %a.sext, %b.sext
+ %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+ %trunc = trunc <32 x i16> %abs to <32 x i8>
+ ret <32 x i8> %trunc
+}
+
+define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) #0 {
+; CHECK-LABEL: xvabsd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.h $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.sext = sext <16 x i16> %a to <16 x i32>
+ %b.sext = sext <16 x i16> %b to <16 x i32>
+ %sub = sub <16 x i32> %a.sext, %b.sext
+ %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+ %trunc = trunc <16 x i32> %abs to <16 x i16>
+ ret <16 x i16> %trunc
+}
+
+define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) #0 {
+; CHECK-LABEL: xvabsd_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.w $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i32> %a to <8 x i64>
+ %b.sext = sext <8 x i32> %b to <8 x i64>
+ %sub = sub <8 x i64> %a.sext, %b.sext
+ %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
+ %trunc = trunc <8 x i64> %abs to <8 x i32>
+ ret <8 x i32> %trunc
+}
+
+define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) #0 {
+; CHECK-LABEL: xvabsd_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.d $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i64> %a to <4 x i128>
+ %b.sext = sext <4 x i64> %b to <4 x i128>
+ %sub = sub <4 x i128> %a.sext, %b.sext
+ %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
+ %trunc = trunc <4 x i128> %abs to <4 x i64>
+ ret <4 x i64> %trunc
+}
+
+;
+; xvabsd_[B/H/W/D]U
+;
+
+define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) #0 {
+; CHECK-LABEL: xvabsd_bu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.zext = zext <32 x i8> %a to <32 x i16>
+ %b.zext = zext <32 x i8> %b to <32 x i16>
+ %sub = sub <32 x i16> %a.zext, %b.zext
+ %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+ %trunc = trunc <32 x i16> %abs to <32 x i8>
+ ret <32 x i8> %trunc
+}
+
+define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) #0 {
+; CHECK-LABEL: xvabsd_hu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.hu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.zext = zext <16 x i16> %a to <16 x i32>
+ %b.zext = zext <16 x i16> %b to <16 x i32>
+ %sub = sub <16 x i32> %a.zext, %b.zext
+ %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+ %trunc = trunc <16 x i32> %abs to <16 x i16>
+ ret <16 x i16> %trunc
+}
+
+define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) #0 {
+; CHECK-LABEL: xvabsd_wu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.wu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i32> %a to <8 x i64>
+ %b.zext = zext <8 x i32> %b to <8 x i64>
+ %sub = sub <8 x i64> %a.zext, %b.zext
+ %abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
+ %trunc = trunc <8 x i64> %abs to <8 x i32>
+ ret <8 x i32> %trunc
+}
+
+define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) #0 {
+; CHECK-LABEL: xvabsd_du:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.du $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i64> %a to <4 x i128>
+ %b.zext = zext <4 x i64> %b to <4 x i128>
+ %sub = sub <4 x i128> %a.zext, %b.zext
+ %abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
+ %trunc = trunc <4 x i128> %abs to <4 x i64>
+ ret <4 x i64> %trunc
+}
+
+define <32 x i8> @xvabsd_v32i8_nsw(<32 x i8> %a, <32 x i8> %b) #0 {
+; CHECK-LABEL: xvabsd_v32i8_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvneg.b $xr1, $xr0
+; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <32 x i8> %a, %b
+ %abs = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %sub, i1 true)
+ ret <32 x i8> %abs
+}
+
+define <16 x i16> @xvabsd_v16i16_nsw(<16 x i16> %a, <16 x i16> %b) #0 {
+; CHECK-LABEL: xvabsd_v16i16_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvneg.h $xr1, $xr0
+; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <16 x i16> %a, %b
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ ret <16 x i16> %abs
+}
+
+define <8 x i32> @xvabsd_v8i32_nsw(<8 x i32> %a, <8 x i32> %b) #0 {
+; CHECK-LABEL: xvabsd_v8i32_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvneg.w $xr1, $xr0
+; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <8 x i32> %a, %b
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ ret <8 x i32> %abs
+}
+
+define <4 x i64> @xvabsd_v4i64_nsw(<4 x i64> %a, <4 x i64> %b) #0 {
+; CHECK-LABEL: xvabsd_v4i64_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvneg.d $xr1, $xr0
+; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <4 x i64> %a, %b
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ ret <4 x i64> %abs
+}
+
+define <32 x i8> @smaxmin_v32i8(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: smaxmin_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.b $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <32 x i8> @llvm.smax.v32i8(<32 x i8> %0, <32 x i8> %1)
+ %b = tail call <32 x i8> @llvm.smin.v32i8(<32 x i8> %0, <32 x i8> %1)
+ %sub = sub <32 x i8> %a, %b
+ ret <32 x i8> %sub
+}
+
+define <16 x i16> @smaxmin_v16i16(<16 x i16> %0, <16 x i16> %1) {
+; CHECK-LABEL: smaxmin_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.h $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %0, <16 x i16> %1)
+ %b = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %0, <16 x i16> %1)
+ %sub = sub <16 x i16> %a, %b
+ ret <16 x i16> %sub
+}
+
+define <8 x i32> @smaxmin_v8i32(<8 x i32> %0, <8 x i32> %1) {
+; CHECK-LABEL: smaxmin_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.w $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %0, <8 x i32> %1)
+ %b = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %0, <8 x i32> %1)
+ %sub = sub <8 x i32> %a, %b
+ ret <8 x i32> %sub
+}
+
+define <4 x i64> @smaxmin_v4i64(<4 x i64> %0, <4 x i64> %1) {
+; CHECK-LABEL: smaxmin_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.d $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %0, <4 x i64> %1)
+ %b = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %0, <4 x i64> %1)
+ %sub = sub <4 x i64> %a, %b
+ ret <4 x i64> %sub
+}
+
+define <32 x i8> @umaxmin_v32i8(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: umaxmin_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
+ %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %0, <32 x i8> %1)
+ %sub = sub <32 x i8> %a, %b
+ ret <32 x i8> %sub
+}
+
+define <16 x i16> @umaxmin_v16i16(<16 x i16> %0, <16 x i16> %1) {
+; CHECK-LABEL: umaxmin_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.hu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <16 x i16> @llvm.umax.v16i16(<16 x i16> %0, <16 x i16> %1)
+ %b = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %0, <16 x i16> %1)
+ %sub = sub <16 x i16> %a, %b
+ ret <16 x i16> %sub
+}
+
+define <8 x i32> @umaxmin_v8i32(<8 x i32> %0, <8 x i32> %1) {
+; CHECK-LABEL: umaxmin_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.wu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <8 x i32> @llvm.umax.v8i32(<8 x i32> %0, <8 x i32> %1)
+ %b = tail call <8 x i32> @llvm.umin.v8i32(<8 x i32> %0, <8 x i32> %1)
+ %sub = sub <8 x i32> %a, %b
+ ret <8 x i32> %sub
+}
+
+define <4 x i64> @umaxmin_v4i64(<4 x i64> %0, <4 x i64> %1) {
+; CHECK-LABEL: umaxmin_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.du $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <4 x i64> @llvm.umax.v4i64(<4 x i64> %0, <4 x i64> %1)
+ %b = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %0, <4 x i64> %1)
+ %sub = sub <4 x i64> %a, %b
+ ret <4 x i64> %sub
+}
+
+define <32 x i8> @umaxmin_v32i8_com1(<32 x i8> %0, <32 x i8> %1) {
+; CHECK-LABEL: umaxmin_v32i8_com1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvmin.bu $xr2, $xr0, $xr1
+; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
+; CHECK-NEXT: ret
+ %a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
+ %b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %1, <32 x i8> %0)
+ %sub = sub <32 x i8> %a, %b
+ ret <32 x i8> %sub
+}
+
+declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
+
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
+
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
+
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
+
+declare <4 x i128> @llvm.abs.v4i128(<4 x i128>, i1)
+
+declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
+declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
+declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
new file mode 100644
index 0000000000000..3ecfaed8e1505
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/absd.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+;; Mostly copy from AArch64/neon-abd.ll
+
+;
+; VABDS_[B/H/W/D]
+;
+define <16 x i8> @vabsd_b(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK-LABEL: vabsd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.b $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.sext = sext <16 x i8> %a to <16 x i16>
+ %b.sext = sext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <8 x i16> @vabsd_h(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: vabsd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.h $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i16> %a to <8 x i32>
+ %b.sext = sext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <4 x i32> @vabsd_w(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: vabsd_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.w $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i32> %a to <4 x i64>
+ %b.sext = sext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.sext, %b.sext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <2 x i64> @vabsd_d(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: vabsd_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.d $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i64> %a to <2 x i128>
+ %b.sext = sext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.sext, %b.sext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+;
+; VABSD_[B/H/W/D]U
+;
+
+define <16 x i8> @vabsd_bu(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK-LABEL: vabsd_bu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.bu $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.zext = zext <16 x i8> %a to <16 x i16>
+ %b.zext = zext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.zext, %b.zext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <8 x i16> @vabsd_hu(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: vabsd_hu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.hu $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i16> %a to <8 x i32>
+ %b.zext = zext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.zext, %b.zext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <4 x i32> @vabsd_wu(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: vabsd_wu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.wu $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i32> %a to <4 x i64>
+ %b.zext = zext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.zext, %b.zext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <2 x i64> @vabsd_du(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: vabsd_du:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.du $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i64> %a to <2 x i128>
+ %b.zext = zext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.zext, %b.zext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <16 x i8> @vabsd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK-LABEL: vabsd_v16i8_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsub.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vneg.b $vr1, $vr0
+; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @vabsd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: vabsd_v8i16_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsub.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vneg.h $vr1, $vr0
+; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @vabsd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: vabsd_v4i32_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsub.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vneg.w $vr1, $vr0
+; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @vabsd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: vabsd_v2i64_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsub.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vneg.d $vr1, $vr0
+; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT: ret
+ %sub = sub nsw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: smaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.b $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.b $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: smaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.h $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.h $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: smaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.w $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.w $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: smaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmin.d $vr2, $vr0, $vr1
+; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vsub.d $vr0, $vr0, $vr2
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LAB...
[truncated]
|
;; 1. trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b) or abdu(a,b) | ||
;; 2. abs(sub_nsw(x, y)) -> abds(a,b) | ||
;; 3. sub(smax(a,b),smin(a,b)) -> abds(a,b) or abdu(a,b) | ||
;; 4. select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) or abdu(a,b) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
;; 4. select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) or abdu(a,b) | |
;; 4. select(icmp(a,b, slt|ult),sub(a,b),sub(b,a)) -> abds(a,b) or abdu(a,b) |
;; 2. abs(sub_nsw(x, y)) -> abds(a,b) | ||
;; 3. sub(smax(a,b),smin(a,b)) -> abds(a,b) or abdu(a,b) | ||
;; 4. select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) or abdu(a,b) | ||
;; 5. sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b) or abdu(a,b) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ditto
ret <32 x i8> %sub | ||
} | ||
|
||
;; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
;; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b) | |
;; select(icmp(a,b,slt),sub(a,b),sub(b,a)) -> abds(a,b) |
No description provided.