Skip to content

Conversation

@ylzsx
Copy link
Contributor

@ylzsx ylzsx commented Oct 27, 2025

No description provided.

@llvmbot
Copy link
Member

llvmbot commented Oct 27, 2025

@llvm/pr-subscribers-backend-loongarch

Author: Zhaoxin Yang (ylzsx)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/165213.diff

2 Files Affected:

  • (added) llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll (+308)
  • (added) llvm/test/CodeGen/LoongArch/lsx/fp-rounding.ll (+212)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll b/llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll
new file mode 100644
index 0000000000000..d570dba3f4658
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll
@@ -0,0 +1,308 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefix=CHECK
+
+;; ceilf
+define void @ceil_v8f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ceil_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrp.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0
+; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0
+; CHECK-NEXT:    vfrintrp.s $vr3, $vr3
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrp.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2
+; CHECK-NEXT:    xvst $xr3, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %r = call <8 x float> @llvm.ceil.v8f32(<8 x float> %v0)
+  store <8 x float> %r, ptr %res
+  ret void
+}
+
+;; ceil
+define void @ceil_v4f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ceil_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2
+; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrp.d $vr2, $vr2
+; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrp.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %r = call <4 x double> @llvm.ceil.v4f64(<4 x double> %v0)
+  store <4 x double> %r, ptr %res
+  ret void
+}
+
+;; floorf
+define void @floor_v8f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: floor_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrm.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0
+; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0
+; CHECK-NEXT:    vfrintrm.s $vr3, $vr3
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrm.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2
+; CHECK-NEXT:    xvst $xr3, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %r = call <8 x float> @llvm.floor.v8f32(<8 x float> %v0)
+  store <8 x float> %r, ptr %res
+  ret void
+}
+
+;; floor
+define void @floor_v4f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: floor_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2
+; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrm.d $vr2, $vr2
+; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrm.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %r = call <4 x double> @llvm.floor.v4f64(<4 x double> %v0)
+  store <4 x double> %r, ptr %res
+  ret void
+}
+
+;; truncf
+define void @trunc_v8f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: trunc_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrz.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0
+; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0
+; CHECK-NEXT:    vfrintrz.s $vr3, $vr3
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrz.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2
+; CHECK-NEXT:    xvst $xr3, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %r = call <8 x float> @llvm.trunc.v8f32(<8 x float> %v0)
+  store <8 x float> %r, ptr %res
+  ret void
+}
+
+;; trunc
+define void @trunc_v4f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: trunc_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2
+; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrz.d $vr2, $vr2
+; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrz.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %r = call <4 x double> @llvm.trunc.v4f64(<4 x double> %v0)
+  store <4 x double> %r, ptr %res
+  ret void
+}
+
+;; roundevenf
+define void @roundeven_v8f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: roundeven_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrne.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0
+; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0
+; CHECK-NEXT:    vfrintrne.s $vr3, $vr3
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16
+; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32
+; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrne.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48
+; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2
+; CHECK-NEXT:    xvst $xr3, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %r = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %v0)
+  store <8 x float> %r, ptr %res
+  ret void
+}
+
+;; roundeven
+define void @roundeven_v4f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: roundeven_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2
+; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrne.d $vr2, $vr2
+; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16
+; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.d $vr1, $vr1
+; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrne.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %r = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %v0)
+  store <4 x double> %r, ptr %res
+  ret void
+}
+
+declare <8 x float> @llvm.ceil.v8f32(<8 x float>)
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>)
+declare <8 x float> @llvm.floor.v8f32(<8 x float>)
+declare <4 x double> @llvm.floor.v4f64(<4 x double>)
+declare <8 x float> @llvm.trunc.v8f32(<8 x float>)
+declare <4 x double> @llvm.trunc.v4f64(<4 x double>)
+declare <8 x float> @llvm.roundeven.v8f32(<8 x float>)
+declare <4 x double> @llvm.roundeven.v4f64(<4 x double>)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/fp-rounding.ll b/llvm/test/CodeGen/LoongArch/lsx/fp-rounding.ll
new file mode 100644
index 0000000000000..6ccf45745e7d4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/fp-rounding.ll
@@ -0,0 +1,212 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK
+
+;; ceilf
+define void @ceil_v4f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ceil_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    vreplvei.w $vr2, $vr0, 0
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrp.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrp.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr2, $vr0, 48
+; CHECK-NEXT:    vst $vr2, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %r = call <4 x float> @llvm.ceil.v4f32(<4 x float> %v0)
+  store <4 x float> %r, ptr %res
+  ret void
+}
+
+;; ceil
+define void @ceil_v2f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ceil_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrp.d $vr1, $vr1
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrp.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %r = call <2 x double> @llvm.ceil.v2f64(<2 x double> %v0)
+  store <2 x double> %r, ptr %res
+  ret void
+}
+
+;; floorf
+define void @floor_v4f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: floor_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    vreplvei.w $vr2, $vr0, 0
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrm.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrm.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr2, $vr0, 48
+; CHECK-NEXT:    vst $vr2, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %r = call <4 x float> @llvm.floor.v4f32(<4 x float> %v0)
+  store <4 x float> %r, ptr %res
+  ret void
+}
+
+;; floor
+define void @floor_v2f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: floor_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrm.d $vr1, $vr1
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrm.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %r = call <2 x double> @llvm.floor.v2f64(<2 x double> %v0)
+  store <2 x double> %r, ptr %res
+  ret void
+}
+
+;; truncf
+define void @trunc_v4f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: trunc_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    vreplvei.w $vr2, $vr0, 0
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrz.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrz.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr2, $vr0, 48
+; CHECK-NEXT:    vst $vr2, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %r = call <4 x float> @llvm.trunc.v4f32(<4 x float> %v0)
+  store <4 x float> %r, ptr %res
+  ret void
+}
+
+;; trunc
+define void @trunc_v2f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: trunc_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrz.d $vr1, $vr1
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrz.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %r = call <2 x double> @llvm.trunc.v2f64(<2 x double> %v0)
+  store <2 x double> %r, ptr %res
+  ret void
+}
+
+;; roundevenf
+define void @roundeven_v4f32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: roundeven_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    vreplvei.w $vr2, $vr0, 0
+; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0
+; CHECK-NEXT:    vfrintrne.s $vr2, $vr2
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 2
+; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.s $vr1, $vr1
+; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrne.s $vr0, $vr0
+; CHECK-NEXT:    vextrins.w $vr2, $vr0, 48
+; CHECK-NEXT:    vst $vr2, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %r = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %v0)
+  store <4 x float> %r, ptr %res
+  ret void
+}
+
+;; roundeven
+define void @roundeven_v2f64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: roundeven_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0
+; CHECK-NEXT:    vfrintrne.d $vr1, $vr1
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vfrintrne.d $vr0, $vr0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %r = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %v0)
+  store <2 x double> %r, ptr %res
+  ret void
+}
+
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
+declare <4 x float> @llvm.floor.v4f32(<4 x float>)
+declare <2 x double> @llvm.floor.v2f64(<2 x double>)
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
+declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
+declare <4 x float> @llvm.roundeven.v4f32(<4 x float>)
+declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)

@ylzsx ylzsx merged commit 2981b5d into main Oct 31, 2025
13 of 14 checks passed
@ylzsx ylzsx deleted the users/ylzsx/precommit-fp-rounding branch October 31, 2025 05:57
DEBADRIBASAK pushed a commit to DEBADRIBASAK/llvm-project that referenced this pull request Nov 3, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants