diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll new file mode 100644 index 0000000000000..b5d5c9c15d7c8 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll @@ -0,0 +1,353 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 + +;; xvinsve0.w +define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI0_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI0_0) +; CHECK-NEXT: xvshuf.w $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind { +; LA32-LABEL: xvinsve0_v8i32_l_4: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ld.w $a2, $a2, 0 +; LA32-NEXT: xvld $xr0, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 5 +; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 6 +; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 7 +; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 3 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 1 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 2 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 3 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 3 +; LA32-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA32-NEXT: xvst $xr2, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: xvinsve0_v8i32_l_4: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvld $xr0, $a2, 0 +; LA64-NEXT: xvld $xr1, $a1, 0 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 0 +; LA64-NEXT: vinsgr2vr.w $vr0, $a1, 0 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 5 +; LA64-NEXT: vinsgr2vr.w $vr0, $a1, 1 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 6 +; LA64-NEXT: vinsgr2vr.w $vr0, $a1, 2 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 7 +; LA64-NEXT: vinsgr2vr.w $vr0, $a1, 3 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 0 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 0 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 1 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 1 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 2 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 2 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 3 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 3 +; LA64-NEXT: xvpermi.q $xr2, $xr0, 2 +; LA64-NEXT: xvst $xr2, $a0, 0 +; LA64-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI2_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI2_0) +; CHECK-NEXT: xvshuf.w $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> + store <8 x float> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI3_0) +; CHECK-NEXT: xvshuf.w $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind { +; LA32-LABEL: xvinsve0_v8i32_h_6: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvld $xr0, $a2, 0 +; LA32-NEXT: xvpickve2gr.w $a2, $xr0, 4 +; LA32-NEXT: ld.w $a1, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: xvpickve2gr.w $a2, $xr0, 5 +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 1 +; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 7 +; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 3 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 1 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 2 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 3 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 3 +; LA32-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA32-NEXT: xvst $xr2, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: xvinsve0_v8i32_h_6: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvld $xr0, $a2, 0 +; LA64-NEXT: xvld $xr1, $a1, 0 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 4 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 0 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 5 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 1 +; LA64-NEXT: xvpickve2gr.w $a1, $xr1, 0 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 2 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 7 +; LA64-NEXT: vinsgr2vr.w $vr2, $a1, 3 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 0 +; LA64-NEXT: vinsgr2vr.w $vr1, $a1, 0 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 1 +; LA64-NEXT: vinsgr2vr.w $vr1, $a1, 1 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 2 +; LA64-NEXT: vinsgr2vr.w $vr1, $a1, 2 +; LA64-NEXT: xvpickve2gr.w $a1, $xr0, 3 +; LA64-NEXT: vinsgr2vr.w $vr1, $a1, 3 +; LA64-NEXT: xvpermi.q $xr1, $xr2, 2 +; LA64-NEXT: xvst $xr1, $a0, 0 +; LA64-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI5_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI5_0) +; CHECK-NEXT: xvshuf.w $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> + store <8 x float> %vc, ptr %d + ret void +} + +;; xvinsve0.d +define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI6_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI6_0) +; CHECK-NEXT: xvshuf.d $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind { +; LA32-LABEL: xvinsve0_v4i64_l_2: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvld $xr0, $a2, 0 +; LA32-NEXT: xvpickve2gr.w $a2, $xr0, 0 +; LA32-NEXT: xvld $xr1, $a1, 0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a2, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 1 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 6 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 7 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 3 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 1 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 2 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 3 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 3 +; LA32-NEXT: xvpermi.q $xr0, $xr2, 2 +; LA32-NEXT: xvst $xr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: xvinsve0_v4i64_l_2: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a2, $a2, 0 +; LA64-NEXT: xvld $xr0, $a1, 0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a2, 0 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 3 +; LA64-NEXT: vinsgr2vr.d $vr1, $a1, 1 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a1, 0 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 1 +; LA64-NEXT: vinsgr2vr.d $vr2, $a1, 1 +; LA64-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA64-NEXT: xvst $xr2, $a0, 0 +; LA64-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI8_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI8_0) +; CHECK-NEXT: xvshuf.d $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> + store <4 x double> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI9_0) +; CHECK-NEXT: xvshuf.d $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind { +; LA32-LABEL: xvinsve0_v4i64_h_2: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvld $xr0, $a1, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 0 +; LA32-NEXT: xvld $xr1, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 1 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 6 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 7 +; LA32-NEXT: vinsgr2vr.w $vr2, $a1, 3 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 1 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 1 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 2 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr1, 3 +; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 3 +; LA32-NEXT: xvpermi.q $xr0, $xr2, 2 +; LA32-NEXT: xvst $xr0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: xvinsve0_v4i64_h_2: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a1, $a1, 0 +; LA64-NEXT: xvld $xr0, $a2, 0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a1, 0 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 3 +; LA64-NEXT: vinsgr2vr.d $vr1, $a1, 1 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a1, 0 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 1 +; LA64-NEXT: vinsgr2vr.d $vr2, $a1, 1 +; LA64-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA64-NEXT: xvst $xr2, $a0, 0 +; LA64-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI11_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI11_0) +; CHECK-NEXT: xvshuf.d $xr2, $xr1, $xr0 +; CHECK-NEXT: xvst $xr2, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> + store <4 x double> %vc, ptr %d + ret void +}