From 95dc265f9b4822d6f0c42e921a7583d97b564e7a Mon Sep 17 00:00:00 2001 From: David Mansell Date: Wed, 12 Nov 2025 11:44:08 +0000 Subject: [PATCH] feat: gemm: Add SME1 FP32 kernels. Change-Id: I6ed4d04e0b3e83de85a6e1718098e06de6cb8a64 Signed-off-by: David Mansell --- Android.bp | 3 + filelist.json | 3 + src/BUILD.bazel | 3 + src/CMakeLists.txt | 3 + src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp | 32 +- ..._interleaved_nomerge_fp32_mopa_1VLx4VL.hpp | 84 ++ .../generic.cpp | 653 ++++++++++++++ ..._interleaved_nomerge_fp32_mopa_2VLx2VL.hpp | 84 ++ .../generic.cpp | 686 +++++++++++++++ ..._interleaved_nomerge_fp32_mopa_4VLx1VL.hpp | 84 ++ .../generic.cpp | 798 ++++++++++++++++++ 11 files changed, 2432 insertions(+), 1 deletion(-) create mode 100644 src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp create mode 100644 src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp create mode 100644 src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp create mode 100644 src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp create mode 100644 src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp create mode 100644 src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp diff --git a/Android.bp b/Android.bp index 3cacb3524f..9f659b2f6e 100644 --- a/Android.bp +++ b/Android.bp @@ -1342,6 +1342,9 @@ cc_library_static { "src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp", + "src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp", + "src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp", + "src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp", diff --git a/filelist.json b/filelist.json index 81bd5ef9c8..01ceb9cf6b 100644 --- a/filelist.json +++ b/filelist.json @@ -1807,6 +1807,9 @@ "src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp", + "src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp", + "src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp", + "src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp", "src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp", diff --git a/src/BUILD.bazel b/src/BUILD.bazel index 89e632ddd4..b85a204729 100644 --- a/src/BUILD.bazel +++ b/src/BUILD.bazel @@ -282,6 +282,9 @@ filegroup( "core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp", "core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp", "core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp", + "core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp", + "core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp", + "core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp", "core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp", "core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp", "core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp", diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 3a8815d836..fe70114490 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -251,6 +251,9 @@ target_sources( core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp + core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp + core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp + core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp index 5da7161671..6622a01fb8 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2024 Arm Limited. + * Copyright (c) 2017-2025 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -66,6 +66,11 @@ #include "kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp" #include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp" #endif // ARM_COMPUTE_ENABLE_SME2 +#ifdef ARM_COMPUTE_ENABLE_SME +#include "kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp" +#include "kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp" +#include "kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp" +#endif // ARM_COMPUTE_ENABLE_SME #include "kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp" #include "kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp" @@ -188,6 +193,31 @@ GemmImplementation::with_estimate( [](const GemmArgs &args) { return new GemmInterleavedNoMerge(args); } }, #endif // ARM_COMPUTE_ENABLE_SME2 +#ifdef ARM_COMPUTE_ENABLE_SME +{ + GemmMethod::GEMM_INTERLEAVED, + "sme_interleaved_nomerge_fp32_mopa_1VLx4VL", + [](const GemmArgs &args) { return args._ci->has_sme(); }, + [](const GemmArgs &args) { const auto VL = sme::get_vector_length(); + return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); }, + [](const GemmArgs &args) { return new GemmInterleavedNoMerge(args); } +}, +{ + GemmMethod::GEMM_INTERLEAVED, + "sme_interleaved_nomerge_fp32_mopa_4VLx1VL", + [](const GemmArgs &args) { return args._ci->has_sme(); }, + [](const GemmArgs &args) { const auto VL = sme::get_vector_length(); + return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); }, + [](const GemmArgs &args) { return new GemmInterleavedNoMerge(args); } +}, +{ + GemmMethod::GEMM_INTERLEAVED, + "sme_interleaved_nomerge_fp32_mopa_2VLx2VL", + [](const GemmArgs &args) { return args._ci->has_sme(); }, + nullptr, + [](const GemmArgs &args) { return new GemmInterleavedNoMerge(args); } +}, +#endif // ARM_COMPUTE_ENABLE_SME #ifdef ARM_COMPUTE_ENABLE_BF16 GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp new file mode 100644 index 0000000000..43b36068c0 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2025 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef ARM_COMPUTE_ENABLE_SME + + +#include "../std_transforms_sme.hpp" + +namespace arm_gemm +{ + +// Implementations +void sme_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer); + +class cls_sme_interleaved_nomerge_fp32_mopa_1VLx4VL +{ +public: + typedef float lhs_operand_type; + typedef float rhs_operand_type; + typedef float result_type; + + typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer); + + /* Kernel blocking parameters */ + static unsigned int out_height() + { + return sme::get_vector_length() * 1; + } + + static unsigned int out_width() + { + return sme::get_vector_length() * 4; + } + + static constexpr unsigned int k_unroll() + { + return 1; + } + + static constexpr bool supports_bias() + { + return true; + } + + static constexpr bool is_sme() + { + return true; + } + + // Default to the generic kernel + kern_type kernel = sme_interleaved_nomerge_fp32_mopa_1VLx4VL; + + StdTransformsSME transforms = {}; + + cls_sme_interleaved_nomerge_fp32_mopa_1VLx4VL(const CPUInfo *) + { + } +}; + +} // namespace arm_gemm + +#endif // ARM_COMPUTE_ENABLE_SME2 diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp new file mode 100644 index 0000000000..c4d366c8bb --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp @@ -0,0 +1,653 @@ +/* + * Copyright (c) 2025 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifdef ARM_COMPUTE_ENABLE_SME + +#include "arm_gemm.hpp" + + +#include "../../asmlib.hpp" +#include "../../utils.hpp" + +namespace arm_gemm { + +void sme_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer) +{ + struct KernelArgs + { + KernelArgs( + const float *const A, + const float *const B, + float *const C, const int ldc, + const int M, const int N, const int K, + const float *const bias, + const Activation act, + bool accumulate, + float *const accumulator_buffer + ) : A(A), + B(B), kstride_bytes(K * sizeof(float)), + C(C), ldcb(ldc * sizeof(float)), + M(M), N(N), K(K), + min(-std::numeric_limits::infinity()), + max(std::numeric_limits::infinity()), + bias(bias), + accumulator_buffer(accumulator_buffer), + flags(0x0) + { + if (accumulate) + { + flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER + } + if (C == nullptr) + { + flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER + } + if (act.type == Activation::Type::None) + { + flags |= 1 << 2; // SKIP_ACTIVATION + } + + // Initialise the activation values + switch (act.type) + { + default: + case Activation::Type::None: + break; + case Activation::Type::BoundedReLU: + this->max = static_cast(act.param1); + /* fall through */ + case Activation::Type::ReLU: + this->min = static_cast(0); + break; + } + } + + const float *const A; + const float *const B; + const long kstride_bytes; + float *const C; + const long ldcb; + const long M, N, K; + float min = -std::numeric_limits::infinity(); + float max = std::numeric_limits::infinity(); + + const float *const bias; + + + float *const accumulator_buffer; + uint64_t flags; + }; + + // Construct arguments for this kernel + KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer); + + __asm__ __volatile__( + "ldr x5, [%x[args], %[offsetof_flags]]\n" + ".inst 0xd503477f // SMSTART ZA\n" + "ptrue p4.b\n" + "ldr x6, [%x[args], %[offsetof_accumulator_buffer]]\n" + "ldr x7, [%x[args], %[offsetof_accumulator_buffer]]\n" + "tbz x5, #0, 2f\n" + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "1:" // Initial accumulator load from buffer: Loop + "addvl x22, x6, #4\n" + "addvl x21, x6, #8\n" + ".inst 0xe09f10c0 // ld1w { za0h.s[x12] }, p4/Z, [x6, XZR, LSL #2]\n" + "addvl x20, x6, #12\n" + ".inst 0xe09f12c4 // ld1w { za1h.s[x12] }, p4/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe09f12a8 // ld1w { za2h.s[x12] }, p4/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f128c // ld1w { za3h.s[x12] }, p4/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe09910c1 // ld1w { za0h.s[x12, #1] }, p4/Z, [x6, x25, LSL #2]\n" + ".inst 0xe09912c5 // ld1w { za1h.s[x12, #1] }, p4/Z, [x22, x25, LSL #2]\n" + ".inst 0xe09912a9 // ld1w { za2h.s[x12, #1] }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0xe099128d // ld1w { za3h.s[x12, #1] }, p4/Z, [x20, x25, LSL #2]\n" + ".inst 0xe09810c2 // ld1w { za0h.s[x12, #2] }, p4/Z, [x6, x24, LSL #2]\n" + ".inst 0xe09812c6 // ld1w { za1h.s[x12, #2] }, p4/Z, [x22, x24, LSL #2]\n" + ".inst 0xe09812aa // ld1w { za2h.s[x12, #2] }, p4/Z, [x21, x24, LSL #2]\n" + ".inst 0xe098128e // ld1w { za3h.s[x12, #2] }, p4/Z, [x20, x24, LSL #2]\n" + ".inst 0xe09710c3 // ld1w { za0h.s[x12, #3] }, p4/Z, [x6, x23, LSL #2]\n" + "addvl x6, x6, #16\n" + ".inst 0xe09712c7 // ld1w { za1h.s[x12, #3] }, p4/Z, [x22, x23, LSL #2]\n" + ".inst 0xe09712ab // ld1w { za2h.s[x12, #3] }, p4/Z, [x21, x23, LSL #2]\n" + ".inst 0xe097128f // ld1w { za3h.s[x12, #3] }, p4/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 1b\n" + "2:" // Initial accumulator load from buffer: End + "ldr x8, [%x[args], %[offsetof_K]]\n" + "mov x17, #0x0\n" + "mov x16, #0x0\n" + "ldr w15, [%x[args], %[offsetof_M]]\n" + "ldr w14, [%x[args], %[offsetof_N]]\n" + "ldr x13, [%x[args], %[offsetof_A]]\n" + "3:" // M loop + "ldr x11, [%x[args], %[offsetof_B]]\n" + "4:" // N loop + "mov x20, x16\n" + "mov x10, x13\n" + "whilelt p3.s, x20, x14\n" + "incw x20\n" + "whilelt p2.s, x20, x14\n" + "incw x20\n" + "whilelt p1.s, x20, x14\n" + "incw x20\n" + "whilelt p0.s, x20, x14\n" + "tbnz x5, #0, 5f\n" + "ldr x20, [%x[args], %[offsetof_bias]]\n" + ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n" + "cbz x20, 6f\n" + "add x20, x20, x16, LSL #2\n" + "fmov z20.s, #1.0\n" + "ld1w { z19.s }, p3/Z, [x20]\n" + "ld1w { z18.s }, p2/Z, [x20, #1, MUL VL]\n" + "ld1w { z17.s }, p1/Z, [x20, #2, MUL VL]\n" + "ld1w { z16.s }, p0/Z, [x20, #3, MUL VL]\n" + ".inst 0x80939280 // fmopa za0.s, p4/M, p4/M, z20.s, z19.s\n" + ".inst 0x80929281 // fmopa za1.s, p4/M, p4/M, z20.s, z18.s\n" + ".inst 0x80919282 // fmopa za2.s, p4/M, p4/M, z20.s, z17.s\n" + ".inst 0x80909283 // fmopa za3.s, p4/M, p4/M, z20.s, z16.s\n" + "5:" // Prepare accumulators: Test for last block + "mov x20, x16\n" + "mov x21, x17\n" + "incw x20, ALL, MUL #4\n" + "incw x21\n" + "cmp x20, x14\n" + "mov x20, x5\n" + "csel x21, x17, x21, LT\n" + "bfm x5, XZR, #0x0, #0x0 // bfc x5, #0x0, #0x1\n" + "cmp x21, x15\n" + "csel x5, x20, x5, LT\n" + "6:" // Prepare accumulators: End + "lsr x23, x8, #0x2\n" + "and x22, x8, #0x3\n" + "cbz x23, 9f\n" + "addvl x21, x11, #8\n" + "addvl x20, x11, #12\n" + "ld1w { z3.s }, p4/Z, [x10]\n" + "subs x23, x23, #0x1\n" + "ld1w { z2.s }, p4/Z, [x10, #1, MUL VL]\n" + "ld1w { z1.s }, p4/Z, [x10, #2, MUL VL]\n" + "ld1w { z0.s }, p4/Z, [x10, #3, MUL VL]\n" + "addvl x10, x10, #4\n" + "ld1w { z31.s }, p4/Z, [x11]\n" + "ld1w { z30.s }, p4/Z, [x11, #1, MUL VL]\n" + "ld1w { z29.s }, p4/Z, [x11, #2, MUL VL]\n" + "ld1w { z28.s }, p4/Z, [x11, #3, MUL VL]\n" + "ld1w { z27.s }, p4/Z, [x11, #4, MUL VL]\n" + "ld1w { z26.s }, p4/Z, [x11, #5, MUL VL]\n" + "ld1w { z25.s }, p4/Z, [x11, #6, MUL VL]\n" + "ld1w { z24.s }, p4/Z, [x11, #7, MUL VL]\n" + "addvl x11, x11, #16\n" + "ld1w { z23.s }, p4/Z, [x21]\n" + "ld1w { z22.s }, p4/Z, [x21, #1, MUL VL]\n" + "ld1w { z21.s }, p4/Z, [x21, #2, MUL VL]\n" + "ld1w { z20.s }, p4/Z, [x21, #3, MUL VL]\n" + "ld1w { z19.s }, p4/Z, [x20]\n" + "ld1w { z18.s }, p4/Z, [x20, #1, MUL VL]\n" + "ld1w { z17.s }, p4/Z, [x20, #2, MUL VL]\n" + "ld1w { z16.s }, p4/Z, [x20, #3, MUL VL]\n" + "ble 8f\n" + "7:" // K loop + ".inst 0x809f9060 // fmopa za0.s, p4/M, p4/M, z3.s, z31.s\n" + "addvl x21, x11, #8\n" + "addvl x20, x11, #12\n" + "ld1w { z31.s }, p4/Z, [x11]\n" + ".inst 0x809e9061 // fmopa za1.s, p4/M, p4/M, z3.s, z30.s\n" + "subs x23, x23, #0x1\n" + "ld1w { z30.s }, p4/Z, [x11, #1, MUL VL]\n" + ".inst 0x809d9062 // fmopa za2.s, p4/M, p4/M, z3.s, z29.s\n" + "ld1w { z29.s }, p4/Z, [x11, #2, MUL VL]\n" + ".inst 0x809c9063 // fmopa za3.s, p4/M, p4/M, z3.s, z28.s\n" + "ld1w { z3.s }, p4/Z, [x10]\n" + ".inst 0x809b9040 // fmopa za0.s, p4/M, p4/M, z2.s, z27.s\n" + "ld1w { z28.s }, p4/Z, [x11, #3, MUL VL]\n" + ".inst 0x809a9041 // fmopa za1.s, p4/M, p4/M, z2.s, z26.s\n" + "ld1w { z27.s }, p4/Z, [x11, #4, MUL VL]\n" + ".inst 0x80999042 // fmopa za2.s, p4/M, p4/M, z2.s, z25.s\n" + "ld1w { z26.s }, p4/Z, [x11, #5, MUL VL]\n" + ".inst 0x80989043 // fmopa za3.s, p4/M, p4/M, z2.s, z24.s\n" + "ld1w { z2.s }, p4/Z, [x10, #1, MUL VL]\n" + ".inst 0x80979020 // fmopa za0.s, p4/M, p4/M, z1.s, z23.s\n" + "ld1w { z25.s }, p4/Z, [x11, #6, MUL VL]\n" + ".inst 0x80969021 // fmopa za1.s, p4/M, p4/M, z1.s, z22.s\n" + "ld1w { z24.s }, p4/Z, [x11, #7, MUL VL]\n" + "addvl x11, x11, #16\n" + ".inst 0x80959022 // fmopa za2.s, p4/M, p4/M, z1.s, z21.s\n" + "ld1w { z23.s }, p4/Z, [x21]\n" + ".inst 0x80949023 // fmopa za3.s, p4/M, p4/M, z1.s, z20.s\n" + "ld1w { z1.s }, p4/Z, [x10, #2, MUL VL]\n" + ".inst 0x80939000 // fmopa za0.s, p4/M, p4/M, z0.s, z19.s\n" + "ld1w { z22.s }, p4/Z, [x21, #1, MUL VL]\n" + ".inst 0x80929001 // fmopa za1.s, p4/M, p4/M, z0.s, z18.s\n" + "ld1w { z21.s }, p4/Z, [x21, #2, MUL VL]\n" + ".inst 0x80919002 // fmopa za2.s, p4/M, p4/M, z0.s, z17.s\n" + "ld1w { z20.s }, p4/Z, [x21, #3, MUL VL]\n" + ".inst 0x80909003 // fmopa za3.s, p4/M, p4/M, z0.s, z16.s\n" + "ld1w { z0.s }, p4/Z, [x10, #3, MUL VL]\n" + "addvl x10, x10, #4\n" + "ld1w { z19.s }, p4/Z, [x20]\n" + "ld1w { z18.s }, p4/Z, [x20, #1, MUL VL]\n" + "ld1w { z17.s }, p4/Z, [x20, #2, MUL VL]\n" + "ld1w { z16.s }, p4/Z, [x20, #3, MUL VL]\n" + "bgt 7b\n" + "8:" // K loop tail + ".inst 0x809f9060 // fmopa za0.s, p4/M, p4/M, z3.s, z31.s\n" + ".inst 0x809e9061 // fmopa za1.s, p4/M, p4/M, z3.s, z30.s\n" + ".inst 0x809d9062 // fmopa za2.s, p4/M, p4/M, z3.s, z29.s\n" + ".inst 0x809c9063 // fmopa za3.s, p4/M, p4/M, z3.s, z28.s\n" + ".inst 0x809b9040 // fmopa za0.s, p4/M, p4/M, z2.s, z27.s\n" + ".inst 0x809a9041 // fmopa za1.s, p4/M, p4/M, z2.s, z26.s\n" + ".inst 0x80999042 // fmopa za2.s, p4/M, p4/M, z2.s, z25.s\n" + ".inst 0x80989043 // fmopa za3.s, p4/M, p4/M, z2.s, z24.s\n" + ".inst 0x80979020 // fmopa za0.s, p4/M, p4/M, z1.s, z23.s\n" + ".inst 0x80969021 // fmopa za1.s, p4/M, p4/M, z1.s, z22.s\n" + ".inst 0x80959022 // fmopa za2.s, p4/M, p4/M, z1.s, z21.s\n" + ".inst 0x80949023 // fmopa za3.s, p4/M, p4/M, z1.s, z20.s\n" + ".inst 0x80939000 // fmopa za0.s, p4/M, p4/M, z0.s, z19.s\n" + ".inst 0x80929001 // fmopa za1.s, p4/M, p4/M, z0.s, z18.s\n" + ".inst 0x80919002 // fmopa za2.s, p4/M, p4/M, z0.s, z17.s\n" + ".inst 0x80909003 // fmopa za3.s, p4/M, p4/M, z0.s, z16.s\n" + "9:" // K oddments + "cbz x22, 11f\n" + "10:" // K oddments: Loop + "ld1w { z20.s }, p4/Z, [x10]\n" + "subs x22, x22, #0x1\n" + "addvl x10, x10, #1\n" + "ld1w { z19.s }, p4/Z, [x11]\n" + "ld1w { z18.s }, p4/Z, [x11, #1, MUL VL]\n" + "ld1w { z17.s }, p4/Z, [x11, #2, MUL VL]\n" + "ld1w { z16.s }, p4/Z, [x11, #3, MUL VL]\n" + "addvl x11, x11, #4\n" + ".inst 0x80939280 // fmopa za0.s, p4/M, p4/M, z20.s, z19.s\n" + ".inst 0x80929281 // fmopa za1.s, p4/M, p4/M, z20.s, z18.s\n" + ".inst 0x80919282 // fmopa za2.s, p4/M, p4/M, z20.s, z17.s\n" + ".inst 0x80909283 // fmopa za3.s, p4/M, p4/M, z20.s, z16.s\n" + "bgt 10b\n" + "11:" // K oddments: End + "tbz x5, #1, 15f\n" + "tbz x5, #0, 13f\n" + "mov x12, #0x0\n" + "cntw x9\n" + "cntw x28\n" + "cntw x27, ALL, MUL #2\n" + "cntw x26, ALL, MUL #3\n" + "12:" // Store to partial result buffer: Store and refill: Loop + ".inst 0xe0bf10e0 // st1w { za0h.s[x12] }, p4/Z, [x7, XZR, LSL #2]\n" + ".inst 0xe09f10c0 // ld1w { za0h.s[x12] }, p4/Z, [x6, XZR, LSL #2]\n" + "addvl x25, x7, #4\n" + "addvl x24, x6, #4\n" + ".inst 0xe0bc10e1 // st1w { za0h.s[x12, #1] }, p4/Z, [x7, x28, LSL #2]\n" + ".inst 0xe09c10c1 // ld1w { za0h.s[x12, #1] }, p4/Z, [x6, x28, LSL #2]\n" + "addvl x23, x7, #8\n" + "addvl x22, x6, #8\n" + ".inst 0xe0bb10e2 // st1w { za0h.s[x12, #2] }, p4/Z, [x7, x27, LSL #2]\n" + ".inst 0xe09b10c2 // ld1w { za0h.s[x12, #2] }, p4/Z, [x6, x27, LSL #2]\n" + "addvl x21, x7, #12\n" + "addvl x20, x6, #12\n" + ".inst 0xe0ba10e3 // st1w { za0h.s[x12, #3] }, p4/Z, [x7, x26, LSL #2]\n" + ".inst 0xe09a10c3 // ld1w { za0h.s[x12, #3] }, p4/Z, [x6, x26, LSL #2]\n" + "addvl x7, x7, #16\n" + "addvl x6, x6, #16\n" + ".inst 0xe0bf1324 // st1w { za1h.s[x12] }, p4/Z, [x25, XZR, LSL #2]\n" + ".inst 0xe09f1304 // ld1w { za1h.s[x12] }, p4/Z, [x24, XZR, LSL #2]\n" + ".inst 0xe0bc1325 // st1w { za1h.s[x12, #1] }, p4/Z, [x25, x28, LSL #2]\n" + ".inst 0xe09c1305 // ld1w { za1h.s[x12, #1] }, p4/Z, [x24, x28, LSL #2]\n" + ".inst 0xe0bb1326 // st1w { za1h.s[x12, #2] }, p4/Z, [x25, x27, LSL #2]\n" + ".inst 0xe09b1306 // ld1w { za1h.s[x12, #2] }, p4/Z, [x24, x27, LSL #2]\n" + ".inst 0xe0ba1327 // st1w { za1h.s[x12, #3] }, p4/Z, [x25, x26, LSL #2]\n" + ".inst 0xe09a1307 // ld1w { za1h.s[x12, #3] }, p4/Z, [x24, x26, LSL #2]\n" + ".inst 0xe0bf12e8 // st1w { za2h.s[x12] }, p4/Z, [x23, XZR, LSL #2]\n" + ".inst 0xe09f12c8 // ld1w { za2h.s[x12] }, p4/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe0bc12e9 // st1w { za2h.s[x12, #1] }, p4/Z, [x23, x28, LSL #2]\n" + ".inst 0xe09c12c9 // ld1w { za2h.s[x12, #1] }, p4/Z, [x22, x28, LSL #2]\n" + ".inst 0xe0bb12ea // st1w { za2h.s[x12, #2] }, p4/Z, [x23, x27, LSL #2]\n" + ".inst 0xe09b12ca // ld1w { za2h.s[x12, #2] }, p4/Z, [x22, x27, LSL #2]\n" + ".inst 0xe0ba12eb // st1w { za2h.s[x12, #3] }, p4/Z, [x23, x26, LSL #2]\n" + ".inst 0xe09a12cb // ld1w { za2h.s[x12, #3] }, p4/Z, [x22, x26, LSL #2]\n" + ".inst 0xe0bf12ac // st1w { za3h.s[x12] }, p4/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f128c // ld1w { za3h.s[x12] }, p4/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe0bc12ad // st1w { za3h.s[x12, #1] }, p4/Z, [x21, x28, LSL #2]\n" + ".inst 0xe09c128d // ld1w { za3h.s[x12, #1] }, p4/Z, [x20, x28, LSL #2]\n" + ".inst 0xe0bb12ae // st1w { za3h.s[x12, #2] }, p4/Z, [x21, x27, LSL #2]\n" + ".inst 0xe09b128e // ld1w { za3h.s[x12, #2] }, p4/Z, [x20, x27, LSL #2]\n" + ".inst 0xe0ba12af // st1w { za3h.s[x12, #3] }, p4/Z, [x21, x26, LSL #2]\n" + ".inst 0xe09a128f // ld1w { za3h.s[x12, #3] }, p4/Z, [x20, x26, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x9\n" + "blt 12b\n" + "b 25f\n" + "13:" // Store to partial result buffer: Store only + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "14:" // Store to partial result buffer: Store only: Loop + ".inst 0xe0bf10e0 // st1w { za0h.s[x12] }, p4/Z, [x7, XZR, LSL #2]\n" + "addvl x22, x7, #4\n" + "addvl x21, x7, #8\n" + ".inst 0xe0b910e1 // st1w { za0h.s[x12, #1] }, p4/Z, [x7, x25, LSL #2]\n" + "addvl x20, x7, #12\n" + ".inst 0xe0b810e2 // st1w { za0h.s[x12, #2] }, p4/Z, [x7, x24, LSL #2]\n" + ".inst 0xe0b710e3 // st1w { za0h.s[x12, #3] }, p4/Z, [x7, x23, LSL #2]\n" + "addvl x7, x7, #16\n" + ".inst 0xe0bf12c4 // st1w { za1h.s[x12] }, p4/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe0b912c5 // st1w { za1h.s[x12, #1] }, p4/Z, [x22, x25, LSL #2]\n" + ".inst 0xe0b812c6 // st1w { za1h.s[x12, #2] }, p4/Z, [x22, x24, LSL #2]\n" + ".inst 0xe0b712c7 // st1w { za1h.s[x12, #3] }, p4/Z, [x22, x23, LSL #2]\n" + ".inst 0xe0bf12a8 // st1w { za2h.s[x12] }, p4/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe0b912a9 // st1w { za2h.s[x12, #1] }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0xe0b812aa // st1w { za2h.s[x12, #2] }, p4/Z, [x21, x24, LSL #2]\n" + ".inst 0xe0b712ab // st1w { za2h.s[x12, #3] }, p4/Z, [x21, x23, LSL #2]\n" + ".inst 0xe0bf128c // st1w { za3h.s[x12] }, p4/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe0b9128d // st1w { za3h.s[x12, #1] }, p4/Z, [x20, x25, LSL #2]\n" + ".inst 0xe0b8128e // st1w { za3h.s[x12, #2] }, p4/Z, [x20, x24, LSL #2]\n" + ".inst 0xe0b7128f // st1w { za3h.s[x12, #3] }, p4/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 14b\n" + "b 25f\n" + "15:" // Store to output array + "ldr x25, [%x[args], %[offsetof_C]]\n" + "sub x24, x15, x17\n" + "ldr x23, [%x[args], %[offsetof_ldcb]]\n" + "add x25, x25, x16, LSL #2\n" // C += n + "madd x25, x17, x23, x25\n" // C += m * ldc + "tbz x5, #2, 19f\n" + "cntw x20\n" + "mov x12, #0x0\n" + "cmp x24, x20\n" + "csel x22, x24, x20, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 17f\n" + "16:" // Store to output array: Skip activation: Accumulator row 0 loop + ".inst 0xc0821013 // mova z19.s, p4/M, za0h.s[x12]\n" + ".inst 0xc0821092 // mova z18.s, p4/M, za1h.s[x12]\n" + "st1w { z19.s }, p3, [x25]\n" + ".inst 0xc0821111 // mova z17.s, p4/M, za2h.s[x12]\n" + "st1w { z18.s }, p2, [x25, #1, MUL VL]\n" + ".inst 0xc0821190 // mova z16.s, p4/M, za3h.s[x12]\n" + "st1w { z17.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z16.s }, p0, [x25, #3, MUL VL]\n" + ".inst 0xc082103b // mova z27.s, p4/M, za0h.s[x12, #1]\n" + ".inst 0xc082105a // mova z26.s, p4/M, za0h.s[x12, #2]\n" + ".inst 0xc0821079 // mova z25.s, p4/M, za0h.s[x12, #3]\n" + "add x25, x25, x23\n" + ".inst 0xc08210b8 // mova z24.s, p4/M, za1h.s[x12, #1]\n" + ".inst 0xc08210d7 // mova z23.s, p4/M, za1h.s[x12, #2]\n" + "st1w { z27.s }, p3, [x25]\n" + ".inst 0xc08210f6 // mova z22.s, p4/M, za1h.s[x12, #3]\n" + "st1w { z24.s }, p2, [x25, #1, MUL VL]\n" + ".inst 0xc0821135 // mova z21.s, p4/M, za2h.s[x12, #1]\n" + ".inst 0xc0821154 // mova z20.s, p4/M, za2h.s[x12, #2]\n" + "st1w { z21.s }, p1, [x25, #2, MUL VL]\n" + ".inst 0xc0821173 // mova z19.s, p4/M, za2h.s[x12, #3]\n" + ".inst 0xc08211b2 // mova z18.s, p4/M, za3h.s[x12, #1]\n" + ".inst 0xc08211d1 // mova z17.s, p4/M, za3h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + ".inst 0xc08211f0 // mova z16.s, p4/M, za3h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z26.s }, p3, [x25]\n" + "st1w { z23.s }, p2, [x25, #1, MUL VL]\n" + "cmp x12, x21, LSL #2\n" + "st1w { z20.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z17.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "st1w { z25.s }, p3, [x25]\n" + "st1w { z22.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z19.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z16.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "blt 16b\n" + "17:" // Store to output array: Skip activation: Accumulator row 0 oddments + "cbz x20, 18f\n" + ".inst 0xc0821010 // mova z16.s, p4/M, za0h.s[x12]\n" + ".inst 0xc082103a // mova z26.s, p4/M, za0h.s[x12, #1]\n" + "st1w { z16.s }, p3, [x25]\n" + ".inst 0xc0821059 // mova z25.s, p4/M, za0h.s[x12, #2]\n" + ".inst 0xc0821098 // mova z24.s, p4/M, za1h.s[x12]\n" + ".inst 0xc08210b7 // mova z23.s, p4/M, za1h.s[x12, #1]\n" + "st1w { z24.s }, p2, [x25, #1, MUL VL]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc08210d6 // mova z22.s, p4/M, za1h.s[x12, #2]\n" + ".inst 0xc0821115 // mova z21.s, p4/M, za2h.s[x12]\n" + ".inst 0xc0821134 // mova z20.s, p4/M, za2h.s[x12, #1]\n" + "st1w { z21.s }, p1, [x25, #2, MUL VL]\n" + ".inst 0xc0821153 // mova z19.s, p4/M, za2h.s[x12, #2]\n" + ".inst 0xc0821192 // mova z18.s, p4/M, za3h.s[x12]\n" + ".inst 0xc08211b1 // mova z17.s, p4/M, za3h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + ".inst 0xc08211d0 // mova z16.s, p4/M, za3h.s[x12, #2]\n" + "beq 18f\n" + "subs x20, x20, #0x1\n" + "st1w { z26.s }, p3, [x25]\n" + "st1w { z23.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z20.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z17.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "beq 18f\n" + "st1w { z25.s }, p3, [x25]\n" + "st1w { z22.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z19.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z16.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "18:" // Store to output array: Skip activation: Accumulator row 0 oddments: End + "subs x24, x24, x22\n" + "beq 19f\n" + "b 23f\n" + "19:" // Store to output array: Skip activation: End + "cntw x20\n" + "ld1rw { z1.s }, p4/Z, [%x[args], %[offsetof_KernelArgs_min]]\n" + "mov x12, #0x0\n" + "cmp x24, x20\n" + "ld1rw { z0.s }, p4/Z, [%x[args], %[offsetof_KernelArgs_max]]\n" + "csel x20, x24, x20, LT\n" + "lsr x21, x20, #0x2\n" + "and x20, x20, #0x3\n" + "cbz x21, 21f\n" + "20:" // Store to output array: Accumulator row 0 loop + ".inst 0xc082101f // mova z31.s, p4/M, za0h.s[x12]\n" + ".inst 0xc082109e // mova z30.s, p4/M, za1h.s[x12]\n" + "fmin z31.s, p4/M, z31.s, z0.s\n" + ".inst 0xc082111d // mova z29.s, p4/M, za2h.s[x12]\n" + "fmin z30.s, p4/M, z30.s, z0.s\n" + ".inst 0xc082119c // mova z28.s, p4/M, za3h.s[x12]\n" + "fmin z29.s, p4/M, z29.s, z0.s\n" + ".inst 0xc082103b // mova z27.s, p4/M, za0h.s[x12, #1]\n" + "fmin z28.s, p4/M, z28.s, z0.s\n" + ".inst 0xc08210ba // mova z26.s, p4/M, za1h.s[x12, #1]\n" + "fmin z27.s, p4/M, z27.s, z0.s\n" + ".inst 0xc0821139 // mova z25.s, p4/M, za2h.s[x12, #1]\n" + "fmin z26.s, p4/M, z26.s, z0.s\n" + ".inst 0xc08211b8 // mova z24.s, p4/M, za3h.s[x12, #1]\n" + "fmin z25.s, p4/M, z25.s, z0.s\n" + "fmax z31.s, p4/M, z31.s, z1.s\n" + ".inst 0xc0821057 // mova z23.s, p4/M, za0h.s[x12, #2]\n" + "fmin z24.s, p4/M, z24.s, z0.s\n" + "fmax z30.s, p4/M, z30.s, z1.s\n" + ".inst 0xc08210d6 // mova z22.s, p4/M, za1h.s[x12, #2]\n" + "fmin z23.s, p4/M, z23.s, z0.s\n" + "fmax z29.s, p4/M, z29.s, z1.s\n" + ".inst 0xc0821155 // mova z21.s, p4/M, za2h.s[x12, #2]\n" + "fmin z22.s, p4/M, z22.s, z0.s\n" + "fmax z28.s, p4/M, z28.s, z1.s\n" + ".inst 0xc08211d4 // mova z20.s, p4/M, za3h.s[x12, #2]\n" + "fmin z21.s, p4/M, z21.s, z0.s\n" + "st1w { z31.s }, p3, [x25]\n" + ".inst 0xc0821073 // mova z19.s, p4/M, za0h.s[x12, #3]\n" + "fmin z20.s, p4/M, z20.s, z0.s\n" + "st1w { z30.s }, p2, [x25, #1, MUL VL]\n" + ".inst 0xc08210f2 // mova z18.s, p4/M, za1h.s[x12, #3]\n" + "fmin z19.s, p4/M, z19.s, z0.s\n" + "st1w { z29.s }, p1, [x25, #2, MUL VL]\n" + ".inst 0xc0821171 // mova z17.s, p4/M, za2h.s[x12, #3]\n" + "fmin z18.s, p4/M, z18.s, z0.s\n" + "st1w { z28.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + ".inst 0xc08211f0 // mova z16.s, p4/M, za3h.s[x12, #3]\n" + "fmin z17.s, p4/M, z17.s, z0.s\n" + "fmax z27.s, p4/M, z27.s, z1.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p4/M, z16.s, z0.s\n" + "fmax z26.s, p4/M, z26.s, z1.s\n" + "cmp x12, x21, LSL #2\n" + "fmax z25.s, p4/M, z25.s, z1.s\n" + "fmax z24.s, p4/M, z24.s, z1.s\n" + "fmax z23.s, p4/M, z23.s, z1.s\n" + "fmax z22.s, p4/M, z22.s, z1.s\n" + "st1w { z27.s }, p3, [x25]\n" + "fmax z21.s, p4/M, z21.s, z1.s\n" + "fmax z20.s, p4/M, z20.s, z1.s\n" + "st1w { z26.s }, p2, [x25, #1, MUL VL]\n" + "fmax z19.s, p4/M, z19.s, z1.s\n" + "fmax z18.s, p4/M, z18.s, z1.s\n" + "st1w { z25.s }, p1, [x25, #2, MUL VL]\n" + "fmax z17.s, p4/M, z17.s, z1.s\n" + "fmax z16.s, p4/M, z16.s, z1.s\n" + "st1w { z24.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "st1w { z23.s }, p3, [x25]\n" + "st1w { z22.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z21.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z20.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "st1w { z19.s }, p3, [x25]\n" + "st1w { z18.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z17.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z16.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "blt 20b\n" + "21:" // Store to output array: Accumulator row 0 oddments + "cbz x20, 22f\n" + ".inst 0xc082101b // mova z27.s, p4/M, za0h.s[x12]\n" + ".inst 0xc082103a // mova z26.s, p4/M, za0h.s[x12, #1]\n" + "fmin z27.s, p4/M, z27.s, z0.s\n" + ".inst 0xc0821059 // mova z25.s, p4/M, za0h.s[x12, #2]\n" + "fmin z26.s, p4/M, z26.s, z0.s\n" + ".inst 0xc0821098 // mova z24.s, p4/M, za1h.s[x12]\n" + "fmin z25.s, p4/M, z25.s, z0.s\n" + ".inst 0xc08210b7 // mova z23.s, p4/M, za1h.s[x12, #1]\n" + "fmin z24.s, p4/M, z24.s, z0.s\n" + ".inst 0xc08210d6 // mova z22.s, p4/M, za1h.s[x12, #2]\n" + "fmin z23.s, p4/M, z23.s, z0.s\n" + "subs x20, x20, #0x1\n" + "fmax z27.s, p4/M, z27.s, z1.s\n" + ".inst 0xc0821115 // mova z21.s, p4/M, za2h.s[x12]\n" + "fmin z22.s, p4/M, z22.s, z0.s\n" + "fmax z26.s, p4/M, z26.s, z1.s\n" + ".inst 0xc0821134 // mova z20.s, p4/M, za2h.s[x12, #1]\n" + "fmin z21.s, p4/M, z21.s, z0.s\n" + "fmax z25.s, p4/M, z25.s, z1.s\n" + ".inst 0xc0821153 // mova z19.s, p4/M, za2h.s[x12, #2]\n" + "fmin z20.s, p4/M, z20.s, z0.s\n" + "fmax z24.s, p4/M, z24.s, z1.s\n" + ".inst 0xc0821192 // mova z18.s, p4/M, za3h.s[x12]\n" + "fmin z19.s, p4/M, z19.s, z0.s\n" + "fmax z23.s, p4/M, z23.s, z1.s\n" + ".inst 0xc08211b1 // mova z17.s, p4/M, za3h.s[x12, #1]\n" + "fmin z18.s, p4/M, z18.s, z0.s\n" + "fmax z22.s, p4/M, z22.s, z1.s\n" + ".inst 0xc08211d0 // mova z16.s, p4/M, za3h.s[x12, #2]\n" + "fmin z17.s, p4/M, z17.s, z0.s\n" + "fmax z21.s, p4/M, z21.s, z1.s\n" + "fmin z16.s, p4/M, z16.s, z0.s\n" + "fmax z20.s, p4/M, z20.s, z1.s\n" + "st1w { z27.s }, p3, [x25]\n" + "fmax z19.s, p4/M, z19.s, z1.s\n" + "st1w { z24.s }, p2, [x25, #1, MUL VL]\n" + "fmax z18.s, p4/M, z18.s, z1.s\n" + "fmax z17.s, p4/M, z17.s, z1.s\n" + "st1w { z21.s }, p1, [x25, #2, MUL VL]\n" + "fmax z16.s, p4/M, z16.s, z1.s\n" + "st1w { z18.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "beq 22f\n" + "subs x20, x20, #0x1\n" + "st1w { z26.s }, p3, [x25]\n" + "st1w { z23.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z20.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z17.s }, p0, [x25, #3, MUL VL]\n" + "add x25, x25, x23\n" + "beq 22f\n" + "st1w { z25.s }, p3, [x25]\n" + "st1w { z22.s }, p2, [x25, #1, MUL VL]\n" + "st1w { z19.s }, p1, [x25, #2, MUL VL]\n" + "st1w { z16.s }, p0, [x25, #3, MUL VL]\n" + "22:" // Store to output array: Accumulator row 0 oddments: End + "23:" // Store to output array: End + "tbz x5, #0, 25f\n" + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "24:" // Store to output array: Refill accumulators: Loop + "addvl x22, x6, #4\n" + "addvl x21, x6, #8\n" + ".inst 0xe09f10c0 // ld1w { za0h.s[x12] }, p4/Z, [x6, XZR, LSL #2]\n" + "addvl x20, x6, #12\n" + ".inst 0xe09f12c4 // ld1w { za1h.s[x12] }, p4/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe09f12a8 // ld1w { za2h.s[x12] }, p4/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f128c // ld1w { za3h.s[x12] }, p4/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe09910c1 // ld1w { za0h.s[x12, #1] }, p4/Z, [x6, x25, LSL #2]\n" + ".inst 0xe09912c5 // ld1w { za1h.s[x12, #1] }, p4/Z, [x22, x25, LSL #2]\n" + ".inst 0xe09912a9 // ld1w { za2h.s[x12, #1] }, p4/Z, [x21, x25, LSL #2]\n" + ".inst 0xe099128d // ld1w { za3h.s[x12, #1] }, p4/Z, [x20, x25, LSL #2]\n" + ".inst 0xe09810c2 // ld1w { za0h.s[x12, #2] }, p4/Z, [x6, x24, LSL #2]\n" + ".inst 0xe09812c6 // ld1w { za1h.s[x12, #2] }, p4/Z, [x22, x24, LSL #2]\n" + ".inst 0xe09812aa // ld1w { za2h.s[x12, #2] }, p4/Z, [x21, x24, LSL #2]\n" + ".inst 0xe098128e // ld1w { za3h.s[x12, #2] }, p4/Z, [x20, x24, LSL #2]\n" + ".inst 0xe09710c3 // ld1w { za0h.s[x12, #3] }, p4/Z, [x6, x23, LSL #2]\n" + "addvl x6, x6, #16\n" + ".inst 0xe09712c7 // ld1w { za1h.s[x12, #3] }, p4/Z, [x22, x23, LSL #2]\n" + ".inst 0xe09712ab // ld1w { za2h.s[x12, #3] }, p4/Z, [x21, x23, LSL #2]\n" + ".inst 0xe097128f // ld1w { za3h.s[x12, #3] }, p4/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 24b\n" + "25:" // End block + "incw x16, ALL, MUL #4\n" + "cmp x16, x14\n" + "blt 4b\n" + "incw x17\n" + "mov x16, #0x0\n" + "cmp x17, x15\n" + "mov x13, x10\n" + "blt 3b\n" + ".inst 0xd503467f // SMSTOP\n" + : + : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)) + : "cc", "memory", "p0", "p1", "p10", "p11", "p12", "p13", "p14", "p15", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x5", "x6", "x7", "x8", "x9", "z0", "z1", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z2", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z3", "z30", "z31", "z4", "z5", "z6", "z7", "z8", "z9" + ); +} + +} // namespace arm_gemm + +#endif // ARM_COMPUTE_ENABLE_SME2 diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp new file mode 100644 index 0000000000..2f9c3a524d --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2025 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef ARM_COMPUTE_ENABLE_SME + + +#include "../std_transforms_sme.hpp" + +namespace arm_gemm +{ + +// Implementations +void sme_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer); + +class cls_sme_interleaved_nomerge_fp32_mopa_2VLx2VL +{ +public: + typedef float lhs_operand_type; + typedef float rhs_operand_type; + typedef float result_type; + + typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer); + + /* Kernel blocking parameters */ + static unsigned int out_height() + { + return sme::get_vector_length() * 2; + } + + static unsigned int out_width() + { + return sme::get_vector_length() * 2; + } + + static constexpr unsigned int k_unroll() + { + return 1; + } + + static constexpr bool supports_bias() + { + return true; + } + + static constexpr bool is_sme() + { + return true; + } + + // Default to the generic kernel + kern_type kernel = sme_interleaved_nomerge_fp32_mopa_2VLx2VL; + + StdTransformsSME transforms = {}; + + cls_sme_interleaved_nomerge_fp32_mopa_2VLx2VL(const CPUInfo *) + { + } +}; + +} // namespace arm_gemm + +#endif // ARM_COMPUTE_ENABLE_SME2 diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp new file mode 100644 index 0000000000..18cec9b188 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp @@ -0,0 +1,686 @@ +/* + * Copyright (c) 2025 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifdef ARM_COMPUTE_ENABLE_SME + +#include "arm_gemm.hpp" + + +#include "../../asmlib.hpp" +#include "../../utils.hpp" + +namespace arm_gemm { + +void sme_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer) +{ + struct KernelArgs + { + KernelArgs( + const float *const A, + const float *const B, + float *const C, const int ldc, + const int M, const int N, const int K, + const float *const bias, + const Activation act, + bool accumulate, + float *const accumulator_buffer + ) : A(A), + B(B), kstride_bytes(K * sizeof(float)), + C(C), ldcb(ldc * sizeof(float)), + M(M), N(N), K(K), + min(-std::numeric_limits::infinity()), + max(std::numeric_limits::infinity()), + bias(bias), + accumulator_buffer(accumulator_buffer), + flags(0x0) + { + if (accumulate) + { + flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER + } + if (C == nullptr) + { + flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER + } + if (act.type == Activation::Type::None) + { + flags |= 1 << 2; // SKIP_ACTIVATION + } + + // Initialise the activation values + switch (act.type) + { + default: + case Activation::Type::None: + break; + case Activation::Type::BoundedReLU: + this->max = static_cast(act.param1); + /* fall through */ + case Activation::Type::ReLU: + this->min = static_cast(0); + break; + } + } + + const float *const A; + const float *const B; + const long kstride_bytes; + float *const C; + const long ldcb; + const long M, N, K; + float min = -std::numeric_limits::infinity(); + float max = std::numeric_limits::infinity(); + + const float *const bias; + + + float *const accumulator_buffer; + uint64_t flags; + }; + + // Construct arguments for this kernel + KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer); + + __asm__ __volatile__( + "ldr x5, [%x[args], %[offsetof_flags]]\n" + ".inst 0xd503477f // SMSTART ZA\n" + "ptrue p2.b\n" + "ldr x6, [%x[args], %[offsetof_accumulator_buffer]]\n" + "ldr x7, [%x[args], %[offsetof_accumulator_buffer]]\n" + "tbz x5, #0, 2f\n" + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "1:" // Initial accumulator load from buffer: Loop + "addvl x22, x6, #4\n" + "addvl x21, x6, #8\n" + ".inst 0xe09f08c0 // ld1w { za0h.s[x12] }, p2/Z, [x6, XZR, LSL #2]\n" + "addvl x20, x6, #12\n" + ".inst 0xe09f0ac4 // ld1w { za1h.s[x12] }, p2/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe09f0aa8 // ld1w { za2h.s[x12] }, p2/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f0a8c // ld1w { za3h.s[x12] }, p2/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe09908c1 // ld1w { za0h.s[x12, #1] }, p2/Z, [x6, x25, LSL #2]\n" + ".inst 0xe0990ac5 // ld1w { za1h.s[x12, #1] }, p2/Z, [x22, x25, LSL #2]\n" + ".inst 0xe0990aa9 // ld1w { za2h.s[x12, #1] }, p2/Z, [x21, x25, LSL #2]\n" + ".inst 0xe0990a8d // ld1w { za3h.s[x12, #1] }, p2/Z, [x20, x25, LSL #2]\n" + ".inst 0xe09808c2 // ld1w { za0h.s[x12, #2] }, p2/Z, [x6, x24, LSL #2]\n" + ".inst 0xe0980ac6 // ld1w { za1h.s[x12, #2] }, p2/Z, [x22, x24, LSL #2]\n" + ".inst 0xe0980aaa // ld1w { za2h.s[x12, #2] }, p2/Z, [x21, x24, LSL #2]\n" + ".inst 0xe0980a8e // ld1w { za3h.s[x12, #2] }, p2/Z, [x20, x24, LSL #2]\n" + ".inst 0xe09708c3 // ld1w { za0h.s[x12, #3] }, p2/Z, [x6, x23, LSL #2]\n" + "addvl x6, x6, #16\n" + ".inst 0xe0970ac7 // ld1w { za1h.s[x12, #3] }, p2/Z, [x22, x23, LSL #2]\n" + ".inst 0xe0970aab // ld1w { za2h.s[x12, #3] }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0xe0970a8f // ld1w { za3h.s[x12, #3] }, p2/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 1b\n" + "2:" // Initial accumulator load from buffer: End + "ldr x8, [%x[args], %[offsetof_K]]\n" + "mov x17, #0x0\n" + "mov x16, #0x0\n" + "ldr w15, [%x[args], %[offsetof_M]]\n" + "ldr w14, [%x[args], %[offsetof_N]]\n" + "ldr x13, [%x[args], %[offsetof_A]]\n" + "3:" // M loop + "ldr x11, [%x[args], %[offsetof_B]]\n" + "4:" // N loop + "mov x20, x16\n" + "mov x10, x13\n" + "whilelt p1.s, x20, x14\n" + "incw x20\n" + "whilelt p0.s, x20, x14\n" + "tbnz x5, #0, 5f\n" + "ldr x20, [%x[args], %[offsetof_bias]]\n" + ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n" + "cbz x20, 6f\n" + "add x20, x20, x16, LSL #2\n" + "fmov z18.s, #1.0\n" + "ld1w { z17.s }, p1/Z, [x20]\n" + "ld1w { z16.s }, p0/Z, [x20, #1, MUL VL]\n" + ".inst 0x80914a40 // fmopa za0.s, p2/M, p2/M, z18.s, z17.s\n" + ".inst 0x80904a41 // fmopa za1.s, p2/M, p2/M, z18.s, z16.s\n" + ".inst 0x80914a42 // fmopa za2.s, p2/M, p2/M, z18.s, z17.s\n" + ".inst 0x80904a43 // fmopa za3.s, p2/M, p2/M, z18.s, z16.s\n" + "5:" // Prepare accumulators: Test for last block + "mov x20, x16\n" + "mov x21, x17\n" + "incw x20, ALL, MUL #2\n" + "incw x21, ALL, MUL #2\n" + "cmp x20, x14\n" + "mov x20, x5\n" + "csel x21, x17, x21, LT\n" + "bfm x5, XZR, #0x0, #0x0 // bfc x5, #0x0, #0x1\n" + "cmp x21, x15\n" + "csel x5, x20, x5, LT\n" + "6:" // Prepare accumulators: End + "lsr x21, x8, #0x2\n" + "and x20, x8, #0x3\n" + "cbz x21, 9f\n" + "subs x21, x21, #0x1\n" + "ld1w { z31.s }, p2/Z, [x10]\n" + "ld1w { z30.s }, p2/Z, [x10, #1, MUL VL]\n" + "ld1w { z29.s }, p2/Z, [x10, #2, MUL VL]\n" + "ld1w { z28.s }, p2/Z, [x10, #3, MUL VL]\n" + "ld1w { z27.s }, p2/Z, [x10, #4, MUL VL]\n" + "ld1w { z26.s }, p2/Z, [x10, #5, MUL VL]\n" + "ld1w { z25.s }, p2/Z, [x10, #6, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [x10, #7, MUL VL]\n" + "addvl x10, x10, #8\n" + "ld1w { z23.s }, p2/Z, [x11]\n" + "ld1w { z22.s }, p2/Z, [x11, #1, MUL VL]\n" + "ld1w { z21.s }, p2/Z, [x11, #2, MUL VL]\n" + "ld1w { z20.s }, p2/Z, [x11, #3, MUL VL]\n" + "ld1w { z19.s }, p2/Z, [x11, #4, MUL VL]\n" + "ld1w { z18.s }, p2/Z, [x11, #5, MUL VL]\n" + "ld1w { z17.s }, p2/Z, [x11, #6, MUL VL]\n" + "ld1w { z16.s }, p2/Z, [x11, #7, MUL VL]\n" + "addvl x11, x11, #8\n" + "ble 8f\n" + "7:" // K loop + ".inst 0x80974be0 // fmopa za0.s, p2/M, p2/M, z31.s, z23.s\n" + "subs x21, x21, #0x1\n" + ".inst 0x80964be1 // fmopa za1.s, p2/M, p2/M, z31.s, z22.s\n" + "ld1w { z31.s }, p2/Z, [x10]\n" + ".inst 0x80974bc2 // fmopa za2.s, p2/M, p2/M, z30.s, z23.s\n" + "ld1w { z23.s }, p2/Z, [x11]\n" + ".inst 0x80964bc3 // fmopa za3.s, p2/M, p2/M, z30.s, z22.s\n" + "ld1w { z30.s }, p2/Z, [x10, #1, MUL VL]\n" + ".inst 0x80954ba0 // fmopa za0.s, p2/M, p2/M, z29.s, z21.s\n" + "ld1w { z22.s }, p2/Z, [x11, #1, MUL VL]\n" + ".inst 0x80944ba1 // fmopa za1.s, p2/M, p2/M, z29.s, z20.s\n" + "ld1w { z29.s }, p2/Z, [x10, #2, MUL VL]\n" + ".inst 0x80954b82 // fmopa za2.s, p2/M, p2/M, z28.s, z21.s\n" + "ld1w { z21.s }, p2/Z, [x11, #2, MUL VL]\n" + ".inst 0x80944b83 // fmopa za3.s, p2/M, p2/M, z28.s, z20.s\n" + "ld1w { z28.s }, p2/Z, [x10, #3, MUL VL]\n" + ".inst 0x80934b60 // fmopa za0.s, p2/M, p2/M, z27.s, z19.s\n" + "ld1w { z20.s }, p2/Z, [x11, #3, MUL VL]\n" + ".inst 0x80924b61 // fmopa za1.s, p2/M, p2/M, z27.s, z18.s\n" + "ld1w { z27.s }, p2/Z, [x10, #4, MUL VL]\n" + ".inst 0x80934b42 // fmopa za2.s, p2/M, p2/M, z26.s, z19.s\n" + "ld1w { z19.s }, p2/Z, [x11, #4, MUL VL]\n" + ".inst 0x80924b43 // fmopa za3.s, p2/M, p2/M, z26.s, z18.s\n" + "ld1w { z26.s }, p2/Z, [x10, #5, MUL VL]\n" + ".inst 0x80914b20 // fmopa za0.s, p2/M, p2/M, z25.s, z17.s\n" + "ld1w { z18.s }, p2/Z, [x11, #5, MUL VL]\n" + ".inst 0x80904b21 // fmopa za1.s, p2/M, p2/M, z25.s, z16.s\n" + "ld1w { z25.s }, p2/Z, [x10, #6, MUL VL]\n" + ".inst 0x80914b02 // fmopa za2.s, p2/M, p2/M, z24.s, z17.s\n" + "ld1w { z17.s }, p2/Z, [x11, #6, MUL VL]\n" + ".inst 0x80904b03 // fmopa za3.s, p2/M, p2/M, z24.s, z16.s\n" + "ld1w { z24.s }, p2/Z, [x10, #7, MUL VL]\n" + "addvl x10, x10, #8\n" + "ld1w { z16.s }, p2/Z, [x11, #7, MUL VL]\n" + "addvl x11, x11, #8\n" + "bgt 7b\n" + "8:" // K loop tail + ".inst 0x80974be0 // fmopa za0.s, p2/M, p2/M, z31.s, z23.s\n" + ".inst 0x80964be1 // fmopa za1.s, p2/M, p2/M, z31.s, z22.s\n" + ".inst 0x80974bc2 // fmopa za2.s, p2/M, p2/M, z30.s, z23.s\n" + ".inst 0x80964bc3 // fmopa za3.s, p2/M, p2/M, z30.s, z22.s\n" + ".inst 0x80954ba0 // fmopa za0.s, p2/M, p2/M, z29.s, z21.s\n" + ".inst 0x80944ba1 // fmopa za1.s, p2/M, p2/M, z29.s, z20.s\n" + ".inst 0x80954b82 // fmopa za2.s, p2/M, p2/M, z28.s, z21.s\n" + ".inst 0x80944b83 // fmopa za3.s, p2/M, p2/M, z28.s, z20.s\n" + ".inst 0x80934b60 // fmopa za0.s, p2/M, p2/M, z27.s, z19.s\n" + ".inst 0x80924b61 // fmopa za1.s, p2/M, p2/M, z27.s, z18.s\n" + ".inst 0x80934b42 // fmopa za2.s, p2/M, p2/M, z26.s, z19.s\n" + ".inst 0x80924b43 // fmopa za3.s, p2/M, p2/M, z26.s, z18.s\n" + ".inst 0x80914b20 // fmopa za0.s, p2/M, p2/M, z25.s, z17.s\n" + ".inst 0x80904b21 // fmopa za1.s, p2/M, p2/M, z25.s, z16.s\n" + ".inst 0x80914b02 // fmopa za2.s, p2/M, p2/M, z24.s, z17.s\n" + ".inst 0x80904b03 // fmopa za3.s, p2/M, p2/M, z24.s, z16.s\n" + "9:" // K oddments + "cbz x20, 11f\n" + "10:" // K oddments: Loop + "ld1w { z19.s }, p2/Z, [x10]\n" + "subs x20, x20, #0x1\n" + "ld1w { z18.s }, p2/Z, [x10, #1, MUL VL]\n" + "addvl x10, x10, #2\n" + "ld1w { z17.s }, p2/Z, [x11]\n" + "ld1w { z16.s }, p2/Z, [x11, #1, MUL VL]\n" + "addvl x11, x11, #2\n" + ".inst 0x80914a60 // fmopa za0.s, p2/M, p2/M, z19.s, z17.s\n" + ".inst 0x80904a61 // fmopa za1.s, p2/M, p2/M, z19.s, z16.s\n" + ".inst 0x80914a42 // fmopa za2.s, p2/M, p2/M, z18.s, z17.s\n" + ".inst 0x80904a43 // fmopa za3.s, p2/M, p2/M, z18.s, z16.s\n" + "bgt 10b\n" + "11:" // K oddments: End + "tbz x5, #1, 15f\n" + "tbz x5, #0, 13f\n" + "mov x12, #0x0\n" + "cntw x9\n" + "cntw x28\n" + "cntw x27, ALL, MUL #2\n" + "cntw x26, ALL, MUL #3\n" + "12:" // Store to partial result buffer: Store and refill: Loop + ".inst 0xe0bf08e0 // st1w { za0h.s[x12] }, p2/Z, [x7, XZR, LSL #2]\n" + ".inst 0xe09f08c0 // ld1w { za0h.s[x12] }, p2/Z, [x6, XZR, LSL #2]\n" + "addvl x25, x7, #4\n" + "addvl x24, x6, #4\n" + ".inst 0xe0bc08e1 // st1w { za0h.s[x12, #1] }, p2/Z, [x7, x28, LSL #2]\n" + ".inst 0xe09c08c1 // ld1w { za0h.s[x12, #1] }, p2/Z, [x6, x28, LSL #2]\n" + "addvl x23, x7, #8\n" + "addvl x22, x6, #8\n" + ".inst 0xe0bb08e2 // st1w { za0h.s[x12, #2] }, p2/Z, [x7, x27, LSL #2]\n" + ".inst 0xe09b08c2 // ld1w { za0h.s[x12, #2] }, p2/Z, [x6, x27, LSL #2]\n" + "addvl x21, x7, #12\n" + "addvl x20, x6, #12\n" + ".inst 0xe0ba08e3 // st1w { za0h.s[x12, #3] }, p2/Z, [x7, x26, LSL #2]\n" + ".inst 0xe09a08c3 // ld1w { za0h.s[x12, #3] }, p2/Z, [x6, x26, LSL #2]\n" + "addvl x7, x7, #16\n" + "addvl x6, x6, #16\n" + ".inst 0xe0bf0b24 // st1w { za1h.s[x12] }, p2/Z, [x25, XZR, LSL #2]\n" + ".inst 0xe09f0b04 // ld1w { za1h.s[x12] }, p2/Z, [x24, XZR, LSL #2]\n" + ".inst 0xe0bc0b25 // st1w { za1h.s[x12, #1] }, p2/Z, [x25, x28, LSL #2]\n" + ".inst 0xe09c0b05 // ld1w { za1h.s[x12, #1] }, p2/Z, [x24, x28, LSL #2]\n" + ".inst 0xe0bb0b26 // st1w { za1h.s[x12, #2] }, p2/Z, [x25, x27, LSL #2]\n" + ".inst 0xe09b0b06 // ld1w { za1h.s[x12, #2] }, p2/Z, [x24, x27, LSL #2]\n" + ".inst 0xe0ba0b27 // st1w { za1h.s[x12, #3] }, p2/Z, [x25, x26, LSL #2]\n" + ".inst 0xe09a0b07 // ld1w { za1h.s[x12, #3] }, p2/Z, [x24, x26, LSL #2]\n" + ".inst 0xe0bf0ae8 // st1w { za2h.s[x12] }, p2/Z, [x23, XZR, LSL #2]\n" + ".inst 0xe09f0ac8 // ld1w { za2h.s[x12] }, p2/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe0bc0ae9 // st1w { za2h.s[x12, #1] }, p2/Z, [x23, x28, LSL #2]\n" + ".inst 0xe09c0ac9 // ld1w { za2h.s[x12, #1] }, p2/Z, [x22, x28, LSL #2]\n" + ".inst 0xe0bb0aea // st1w { za2h.s[x12, #2] }, p2/Z, [x23, x27, LSL #2]\n" + ".inst 0xe09b0aca // ld1w { za2h.s[x12, #2] }, p2/Z, [x22, x27, LSL #2]\n" + ".inst 0xe0ba0aeb // st1w { za2h.s[x12, #3] }, p2/Z, [x23, x26, LSL #2]\n" + ".inst 0xe09a0acb // ld1w { za2h.s[x12, #3] }, p2/Z, [x22, x26, LSL #2]\n" + ".inst 0xe0bf0aac // st1w { za3h.s[x12] }, p2/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f0a8c // ld1w { za3h.s[x12] }, p2/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe0bc0aad // st1w { za3h.s[x12, #1] }, p2/Z, [x21, x28, LSL #2]\n" + ".inst 0xe09c0a8d // ld1w { za3h.s[x12, #1] }, p2/Z, [x20, x28, LSL #2]\n" + ".inst 0xe0bb0aae // st1w { za3h.s[x12, #2] }, p2/Z, [x21, x27, LSL #2]\n" + ".inst 0xe09b0a8e // ld1w { za3h.s[x12, #2] }, p2/Z, [x20, x27, LSL #2]\n" + ".inst 0xe0ba0aaf // st1w { za3h.s[x12, #3] }, p2/Z, [x21, x26, LSL #2]\n" + ".inst 0xe09a0a8f // ld1w { za3h.s[x12, #3] }, p2/Z, [x20, x26, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x9\n" + "blt 12b\n" + "b 31f\n" + "13:" // Store to partial result buffer: Store only + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "14:" // Store to partial result buffer: Store only: Loop + ".inst 0xe0bf08e0 // st1w { za0h.s[x12] }, p2/Z, [x7, XZR, LSL #2]\n" + "addvl x22, x7, #4\n" + "addvl x21, x7, #8\n" + ".inst 0xe0b908e1 // st1w { za0h.s[x12, #1] }, p2/Z, [x7, x25, LSL #2]\n" + "addvl x20, x7, #12\n" + ".inst 0xe0b808e2 // st1w { za0h.s[x12, #2] }, p2/Z, [x7, x24, LSL #2]\n" + ".inst 0xe0b708e3 // st1w { za0h.s[x12, #3] }, p2/Z, [x7, x23, LSL #2]\n" + "addvl x7, x7, #16\n" + ".inst 0xe0bf0ac4 // st1w { za1h.s[x12] }, p2/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe0b90ac5 // st1w { za1h.s[x12, #1] }, p2/Z, [x22, x25, LSL #2]\n" + ".inst 0xe0b80ac6 // st1w { za1h.s[x12, #2] }, p2/Z, [x22, x24, LSL #2]\n" + ".inst 0xe0b70ac7 // st1w { za1h.s[x12, #3] }, p2/Z, [x22, x23, LSL #2]\n" + ".inst 0xe0bf0aa8 // st1w { za2h.s[x12] }, p2/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe0b90aa9 // st1w { za2h.s[x12, #1] }, p2/Z, [x21, x25, LSL #2]\n" + ".inst 0xe0b80aaa // st1w { za2h.s[x12, #2] }, p2/Z, [x21, x24, LSL #2]\n" + ".inst 0xe0b70aab // st1w { za2h.s[x12, #3] }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0xe0bf0a8c // st1w { za3h.s[x12] }, p2/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe0b90a8d // st1w { za3h.s[x12, #1] }, p2/Z, [x20, x25, LSL #2]\n" + ".inst 0xe0b80a8e // st1w { za3h.s[x12, #2] }, p2/Z, [x20, x24, LSL #2]\n" + ".inst 0xe0b70a8f // st1w { za3h.s[x12, #3] }, p2/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 14b\n" + "b 31f\n" + "15:" // Store to output array + "ldr x26, [%x[args], %[offsetof_C]]\n" + "sub x25, x15, x17\n" + "ldr x24, [%x[args], %[offsetof_ldcb]]\n" + "add x26, x26, x16, LSL #2\n" // C += n + "madd x26, x17, x24, x26\n" // C += m * ldc + "tbz x5, #2, 22f\n" + "cntw x23\n" + "mov x12, #0x0\n" + "cmp x25, x23\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 17f\n" + "16:" // Store to output array: Skip activation: Accumulator row 0 loop + ".inst 0xc0820810 // mova z16.s, p2/M, za0h.s[x12]\n" + ".inst 0xc0820896 // mova z22.s, p2/M, za1h.s[x12]\n" + "st1w { z16.s }, p1, [x26]\n" + "st1w { z22.s }, p0, [x26, #1, MUL VL]\n" + ".inst 0xc0820835 // mova z21.s, p2/M, za0h.s[x12, #1]\n" + "add x26, x26, x24\n" + ".inst 0xc0820854 // mova z20.s, p2/M, za0h.s[x12, #2]\n" + ".inst 0xc0820873 // mova z19.s, p2/M, za0h.s[x12, #3]\n" + "st1w { z21.s }, p1, [x26]\n" + ".inst 0xc08208b2 // mova z18.s, p2/M, za1h.s[x12, #1]\n" + ".inst 0xc08208d1 // mova z17.s, p2/M, za1h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + ".inst 0xc08208f0 // mova z16.s, p2/M, za1h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z20.s }, p1, [x26]\n" + "st1w { z17.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "cmp x12, x21, LSL #2\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "blt 16b\n" + "17:" // Store to output array: Skip activation: Accumulator row 0 oddments + "cbz x20, 18f\n" + ".inst 0xc0820815 // mova z21.s, p2/M, za0h.s[x12]\n" + ".inst 0xc0820834 // mova z20.s, p2/M, za0h.s[x12, #1]\n" + "st1w { z21.s }, p1, [x26]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820853 // mova z19.s, p2/M, za0h.s[x12, #2]\n" + ".inst 0xc0820892 // mova z18.s, p2/M, za1h.s[x12]\n" + ".inst 0xc08208b1 // mova z17.s, p2/M, za1h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + ".inst 0xc08208d0 // mova z16.s, p2/M, za1h.s[x12, #2]\n" + "beq 18f\n" + "subs x20, x20, #0x1\n" + "st1w { z20.s }, p1, [x26]\n" + "st1w { z17.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "beq 18f\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "18:" // Store to output array: Skip activation: Accumulator row 0 oddments: End + "subs x25, x25, x22\n" + "beq 22f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 20f\n" + "19:" // Store to output array: Skip activation: Accumulator row 1 loop + ".inst 0xc0820910 // mova z16.s, p2/M, za2h.s[x12]\n" + ".inst 0xc0820996 // mova z22.s, p2/M, za3h.s[x12]\n" + "st1w { z16.s }, p1, [x26]\n" + "st1w { z22.s }, p0, [x26, #1, MUL VL]\n" + ".inst 0xc0820935 // mova z21.s, p2/M, za2h.s[x12, #1]\n" + "add x26, x26, x24\n" + ".inst 0xc0820954 // mova z20.s, p2/M, za2h.s[x12, #2]\n" + ".inst 0xc0820973 // mova z19.s, p2/M, za2h.s[x12, #3]\n" + "st1w { z21.s }, p1, [x26]\n" + ".inst 0xc08209b2 // mova z18.s, p2/M, za3h.s[x12, #1]\n" + ".inst 0xc08209d1 // mova z17.s, p2/M, za3h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + ".inst 0xc08209f0 // mova z16.s, p2/M, za3h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z20.s }, p1, [x26]\n" + "st1w { z17.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "cmp x12, x21, LSL #2\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "blt 19b\n" + "20:" // Store to output array: Skip activation: Accumulator row 1 oddments + "cbz x20, 21f\n" + ".inst 0xc0820915 // mova z21.s, p2/M, za2h.s[x12]\n" + ".inst 0xc0820934 // mova z20.s, p2/M, za2h.s[x12, #1]\n" + "st1w { z21.s }, p1, [x26]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820953 // mova z19.s, p2/M, za2h.s[x12, #2]\n" + ".inst 0xc0820992 // mova z18.s, p2/M, za3h.s[x12]\n" + ".inst 0xc08209b1 // mova z17.s, p2/M, za3h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + ".inst 0xc08209d0 // mova z16.s, p2/M, za3h.s[x12, #2]\n" + "beq 21f\n" + "subs x20, x20, #0x1\n" + "st1w { z20.s }, p1, [x26]\n" + "st1w { z17.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "beq 21f\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "21:" // Store to output array: Skip activation: Accumulator row 1 oddments: End + "subs x25, x25, x22\n" + "beq 22f\n" + "b 29f\n" + "22:" // Store to output array: Skip activation: End + "cntw x23\n" + "ld1rw { z25.s }, p2/Z, [%x[args], %[offsetof_KernelArgs_min]]\n" + "mov x12, #0x0\n" + "cmp x25, x23\n" + "ld1rw { z24.s }, p2/Z, [%x[args], %[offsetof_KernelArgs_max]]\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 24f\n" + "23:" // Store to output array: Accumulator row 0 loop + ".inst 0xc0820817 // mova z23.s, p2/M, za0h.s[x12]\n" + ".inst 0xc0820896 // mova z22.s, p2/M, za1h.s[x12]\n" + "fmin z23.s, p2/M, z23.s, z24.s\n" + ".inst 0xc0820835 // mova z21.s, p2/M, za0h.s[x12, #1]\n" + "fmin z22.s, p2/M, z22.s, z24.s\n" + ".inst 0xc08208b4 // mova z20.s, p2/M, za1h.s[x12, #1]\n" + "fmin z21.s, p2/M, z21.s, z24.s\n" + ".inst 0xc0820853 // mova z19.s, p2/M, za0h.s[x12, #2]\n" + "fmin z20.s, p2/M, z20.s, z24.s\n" + ".inst 0xc08208d2 // mova z18.s, p2/M, za1h.s[x12, #2]\n" + "fmin z19.s, p2/M, z19.s, z24.s\n" + "fmax z23.s, p2/M, z23.s, z25.s\n" + ".inst 0xc0820871 // mova z17.s, p2/M, za0h.s[x12, #3]\n" + "fmin z18.s, p2/M, z18.s, z24.s\n" + "fmax z22.s, p2/M, z22.s, z25.s\n" + ".inst 0xc08208f0 // mova z16.s, p2/M, za1h.s[x12, #3]\n" + "fmin z17.s, p2/M, z17.s, z24.s\n" + "fmax z21.s, p2/M, z21.s, z25.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p2/M, z16.s, z24.s\n" + "fmax z20.s, p2/M, z20.s, z25.s\n" + "cmp x12, x21, LSL #2\n" + "st1w { z23.s }, p1, [x26]\n" + "fmax z19.s, p2/M, z19.s, z25.s\n" + "st1w { z22.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "fmax z18.s, p2/M, z18.s, z25.s\n" + "st1w { z21.s }, p1, [x26]\n" + "fmax z17.s, p2/M, z17.s, z25.s\n" + "st1w { z20.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "fmax z16.s, p2/M, z16.s, z25.s\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "st1w { z17.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "blt 23b\n" + "24:" // Store to output array: Accumulator row 0 oddments + "cbz x20, 25f\n" + ".inst 0xc0820815 // mova z21.s, p2/M, za0h.s[x12]\n" + ".inst 0xc0820834 // mova z20.s, p2/M, za0h.s[x12, #1]\n" + "fmin z21.s, p2/M, z21.s, z24.s\n" + ".inst 0xc0820853 // mova z19.s, p2/M, za0h.s[x12, #2]\n" + "fmin z20.s, p2/M, z20.s, z24.s\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820892 // mova z18.s, p2/M, za1h.s[x12]\n" + "fmin z19.s, p2/M, z19.s, z24.s\n" + ".inst 0xc08208b1 // mova z17.s, p2/M, za1h.s[x12, #1]\n" + "fmin z18.s, p2/M, z18.s, z24.s\n" + ".inst 0xc08208d0 // mova z16.s, p2/M, za1h.s[x12, #2]\n" + "fmin z17.s, p2/M, z17.s, z24.s\n" + "fmax z21.s, p2/M, z21.s, z25.s\n" + "fmin z16.s, p2/M, z16.s, z24.s\n" + "fmax z20.s, p2/M, z20.s, z25.s\n" + "fmax z19.s, p2/M, z19.s, z25.s\n" + "fmax z18.s, p2/M, z18.s, z25.s\n" + "fmax z17.s, p2/M, z17.s, z25.s\n" + "st1w { z21.s }, p1, [x26]\n" + "fmax z16.s, p2/M, z16.s, z25.s\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "beq 25f\n" + "subs x20, x20, #0x1\n" + "st1w { z20.s }, p1, [x26]\n" + "st1w { z17.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "beq 25f\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "25:" // Store to output array: Accumulator row 0 oddments: End + "subs x25, x25, x22\n" + "beq 29f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x20, x25, x23, LT\n" + "lsr x21, x20, #0x2\n" + "and x20, x20, #0x3\n" + "cbz x21, 27f\n" + "26:" // Store to output array: Accumulator row 1 loop + ".inst 0xc0820917 // mova z23.s, p2/M, za2h.s[x12]\n" + ".inst 0xc0820996 // mova z22.s, p2/M, za3h.s[x12]\n" + "fmin z23.s, p2/M, z23.s, z24.s\n" + ".inst 0xc0820935 // mova z21.s, p2/M, za2h.s[x12, #1]\n" + "fmin z22.s, p2/M, z22.s, z24.s\n" + ".inst 0xc08209b4 // mova z20.s, p2/M, za3h.s[x12, #1]\n" + "fmin z21.s, p2/M, z21.s, z24.s\n" + ".inst 0xc0820953 // mova z19.s, p2/M, za2h.s[x12, #2]\n" + "fmin z20.s, p2/M, z20.s, z24.s\n" + ".inst 0xc08209d2 // mova z18.s, p2/M, za3h.s[x12, #2]\n" + "fmin z19.s, p2/M, z19.s, z24.s\n" + "fmax z23.s, p2/M, z23.s, z25.s\n" + ".inst 0xc0820971 // mova z17.s, p2/M, za2h.s[x12, #3]\n" + "fmin z18.s, p2/M, z18.s, z24.s\n" + "fmax z22.s, p2/M, z22.s, z25.s\n" + ".inst 0xc08209f0 // mova z16.s, p2/M, za3h.s[x12, #3]\n" + "fmin z17.s, p2/M, z17.s, z24.s\n" + "fmax z21.s, p2/M, z21.s, z25.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p2/M, z16.s, z24.s\n" + "fmax z20.s, p2/M, z20.s, z25.s\n" + "cmp x12, x21, LSL #2\n" + "st1w { z23.s }, p1, [x26]\n" + "fmax z19.s, p2/M, z19.s, z25.s\n" + "st1w { z22.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "fmax z18.s, p2/M, z18.s, z25.s\n" + "st1w { z21.s }, p1, [x26]\n" + "fmax z17.s, p2/M, z17.s, z25.s\n" + "st1w { z20.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "fmax z16.s, p2/M, z16.s, z25.s\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "st1w { z17.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "blt 26b\n" + "27:" // Store to output array: Accumulator row 1 oddments + "cbz x20, 28f\n" + ".inst 0xc0820915 // mova z21.s, p2/M, za2h.s[x12]\n" + ".inst 0xc0820934 // mova z20.s, p2/M, za2h.s[x12, #1]\n" + "fmin z21.s, p2/M, z21.s, z24.s\n" + ".inst 0xc0820953 // mova z19.s, p2/M, za2h.s[x12, #2]\n" + "fmin z20.s, p2/M, z20.s, z24.s\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820992 // mova z18.s, p2/M, za3h.s[x12]\n" + "fmin z19.s, p2/M, z19.s, z24.s\n" + ".inst 0xc08209b1 // mova z17.s, p2/M, za3h.s[x12, #1]\n" + "fmin z18.s, p2/M, z18.s, z24.s\n" + ".inst 0xc08209d0 // mova z16.s, p2/M, za3h.s[x12, #2]\n" + "fmin z17.s, p2/M, z17.s, z24.s\n" + "fmax z21.s, p2/M, z21.s, z25.s\n" + "fmin z16.s, p2/M, z16.s, z24.s\n" + "fmax z20.s, p2/M, z20.s, z25.s\n" + "fmax z19.s, p2/M, z19.s, z25.s\n" + "fmax z18.s, p2/M, z18.s, z25.s\n" + "fmax z17.s, p2/M, z17.s, z25.s\n" + "st1w { z21.s }, p1, [x26]\n" + "fmax z16.s, p2/M, z16.s, z25.s\n" + "st1w { z18.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "beq 28f\n" + "subs x20, x20, #0x1\n" + "st1w { z20.s }, p1, [x26]\n" + "st1w { z17.s }, p0, [x26, #1, MUL VL]\n" + "add x26, x26, x24\n" + "beq 28f\n" + "st1w { z19.s }, p1, [x26]\n" + "st1w { z16.s }, p0, [x26, #1, MUL VL]\n" + "28:" // Store to output array: Accumulator row 1 oddments: End + "29:" // Store to output array: End + "tbz x5, #0, 31f\n" + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "30:" // Store to output array: Refill accumulators: Loop + "addvl x22, x6, #4\n" + "addvl x21, x6, #8\n" + ".inst 0xe09f08c0 // ld1w { za0h.s[x12] }, p2/Z, [x6, XZR, LSL #2]\n" + "addvl x20, x6, #12\n" + ".inst 0xe09f0ac4 // ld1w { za1h.s[x12] }, p2/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe09f0aa8 // ld1w { za2h.s[x12] }, p2/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f0a8c // ld1w { za3h.s[x12] }, p2/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe09908c1 // ld1w { za0h.s[x12, #1] }, p2/Z, [x6, x25, LSL #2]\n" + ".inst 0xe0990ac5 // ld1w { za1h.s[x12, #1] }, p2/Z, [x22, x25, LSL #2]\n" + ".inst 0xe0990aa9 // ld1w { za2h.s[x12, #1] }, p2/Z, [x21, x25, LSL #2]\n" + ".inst 0xe0990a8d // ld1w { za3h.s[x12, #1] }, p2/Z, [x20, x25, LSL #2]\n" + ".inst 0xe09808c2 // ld1w { za0h.s[x12, #2] }, p2/Z, [x6, x24, LSL #2]\n" + ".inst 0xe0980ac6 // ld1w { za1h.s[x12, #2] }, p2/Z, [x22, x24, LSL #2]\n" + ".inst 0xe0980aaa // ld1w { za2h.s[x12, #2] }, p2/Z, [x21, x24, LSL #2]\n" + ".inst 0xe0980a8e // ld1w { za3h.s[x12, #2] }, p2/Z, [x20, x24, LSL #2]\n" + ".inst 0xe09708c3 // ld1w { za0h.s[x12, #3] }, p2/Z, [x6, x23, LSL #2]\n" + "addvl x6, x6, #16\n" + ".inst 0xe0970ac7 // ld1w { za1h.s[x12, #3] }, p2/Z, [x22, x23, LSL #2]\n" + ".inst 0xe0970aab // ld1w { za2h.s[x12, #3] }, p2/Z, [x21, x23, LSL #2]\n" + ".inst 0xe0970a8f // ld1w { za3h.s[x12, #3] }, p2/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 30b\n" + "31:" // End block + "incw x16, ALL, MUL #2\n" + "cmp x16, x14\n" + "blt 4b\n" + "incw x17, ALL, MUL #2\n" + "mov x16, #0x0\n" + "cmp x17, x15\n" + "mov x13, x10\n" + "blt 3b\n" + ".inst 0xd503467f // SMSTOP\n" + : + : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)) + : "cc", "memory", "p0", "p1", "p10", "p11", "p12", "p13", "p14", "p15", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x5", "x6", "x7", "x8", "x9", "z0", "z1", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z2", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z3", "z30", "z31", "z4", "z5", "z6", "z7", "z8", "z9" + ); +} + +} // namespace arm_gemm + +#endif // ARM_COMPUTE_ENABLE_SME2 diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp new file mode 100644 index 0000000000..bc2e14be4d --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2025 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef ARM_COMPUTE_ENABLE_SME + + +#include "../std_transforms_sme.hpp" + +namespace arm_gemm +{ + +// Implementations +void sme_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer); + +class cls_sme_interleaved_nomerge_fp32_mopa_4VLx1VL +{ +public: + typedef float lhs_operand_type; + typedef float rhs_operand_type; + typedef float result_type; + + typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer); + + /* Kernel blocking parameters */ + static unsigned int out_height() + { + return sme::get_vector_length() * 4; + } + + static unsigned int out_width() + { + return sme::get_vector_length() * 1; + } + + static constexpr unsigned int k_unroll() + { + return 1; + } + + static constexpr bool supports_bias() + { + return true; + } + + static constexpr bool is_sme() + { + return true; + } + + // Default to the generic kernel + kern_type kernel = sme_interleaved_nomerge_fp32_mopa_4VLx1VL; + + StdTransformsSME transforms = {}; + + cls_sme_interleaved_nomerge_fp32_mopa_4VLx1VL(const CPUInfo *) + { + } +}; + +} // namespace arm_gemm + +#endif // ARM_COMPUTE_ENABLE_SME2 diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp new file mode 100644 index 0000000000..6a22759c62 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/sme_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp @@ -0,0 +1,798 @@ +/* + * Copyright (c) 2025 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifdef ARM_COMPUTE_ENABLE_SME + +#include "arm_gemm.hpp" + + +#include "../../asmlib.hpp" +#include "../../utils.hpp" + +namespace arm_gemm { + +void sme_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer) +{ + struct KernelArgs + { + KernelArgs( + const float *const A, + const float *const B, + float *const C, const int ldc, + const int M, const int N, const int K, + const float *const bias, + const Activation act, + bool accumulate, + float *const accumulator_buffer + ) : A(A), + B(B), kstride_bytes(K * sizeof(float)), + C(C), ldcb(ldc * sizeof(float)), + M(M), N(N), K(K), + min(-std::numeric_limits::infinity()), + max(std::numeric_limits::infinity()), + bias(bias), + accumulator_buffer(accumulator_buffer), + flags(0x0) + { + if (accumulate) + { + flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER + } + if (C == nullptr) + { + flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER + } + if (act.type == Activation::Type::None) + { + flags |= 1 << 2; // SKIP_ACTIVATION + } + + // Initialise the activation values + switch (act.type) + { + default: + case Activation::Type::None: + break; + case Activation::Type::BoundedReLU: + this->max = static_cast(act.param1); + /* fall through */ + case Activation::Type::ReLU: + this->min = static_cast(0); + break; + } + } + + const float *const A; + const float *const B; + const long kstride_bytes; + float *const C; + const long ldcb; + const long M, N, K; + float min = -std::numeric_limits::infinity(); + float max = std::numeric_limits::infinity(); + + const float *const bias; + + + float *const accumulator_buffer; + uint64_t flags; + }; + + // Construct arguments for this kernel + KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer); + + __asm__ __volatile__( + "ldr x5, [%x[args], %[offsetof_flags]]\n" + ".inst 0xd503477f // SMSTART ZA\n" + "ptrue p1.b\n" + "ldr x6, [%x[args], %[offsetof_accumulator_buffer]]\n" + "ldr x7, [%x[args], %[offsetof_accumulator_buffer]]\n" + "tbz x5, #0, 2f\n" + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "1:" // Initial accumulator load from buffer: Loop + "addvl x22, x6, #4\n" + "addvl x21, x6, #8\n" + ".inst 0xe09f04c0 // ld1w { za0h.s[x12] }, p1/Z, [x6, XZR, LSL #2]\n" + "addvl x20, x6, #12\n" + ".inst 0xe09f06c4 // ld1w { za1h.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe09f06a8 // ld1w { za2h.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f068c // ld1w { za3h.s[x12] }, p1/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe09904c1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x6, x25, LSL #2]\n" + ".inst 0xe09906c5 // ld1w { za1h.s[x12, #1] }, p1/Z, [x22, x25, LSL #2]\n" + ".inst 0xe09906a9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x21, x25, LSL #2]\n" + ".inst 0xe099068d // ld1w { za3h.s[x12, #1] }, p1/Z, [x20, x25, LSL #2]\n" + ".inst 0xe09804c2 // ld1w { za0h.s[x12, #2] }, p1/Z, [x6, x24, LSL #2]\n" + ".inst 0xe09806c6 // ld1w { za1h.s[x12, #2] }, p1/Z, [x22, x24, LSL #2]\n" + ".inst 0xe09806aa // ld1w { za2h.s[x12, #2] }, p1/Z, [x21, x24, LSL #2]\n" + ".inst 0xe098068e // ld1w { za3h.s[x12, #2] }, p1/Z, [x20, x24, LSL #2]\n" + ".inst 0xe09704c3 // ld1w { za0h.s[x12, #3] }, p1/Z, [x6, x23, LSL #2]\n" + "addvl x6, x6, #16\n" + ".inst 0xe09706c7 // ld1w { za1h.s[x12, #3] }, p1/Z, [x22, x23, LSL #2]\n" + ".inst 0xe09706ab // ld1w { za2h.s[x12, #3] }, p1/Z, [x21, x23, LSL #2]\n" + ".inst 0xe097068f // ld1w { za3h.s[x12, #3] }, p1/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 1b\n" + "2:" // Initial accumulator load from buffer: End + "ldr x8, [%x[args], %[offsetof_K]]\n" + "mov x17, #0x0\n" + "mov x16, #0x0\n" + "ldr w15, [%x[args], %[offsetof_M]]\n" + "ldr w14, [%x[args], %[offsetof_N]]\n" + "ldr x13, [%x[args], %[offsetof_A]]\n" + "3:" // M loop + "ldr x11, [%x[args], %[offsetof_B]]\n" + "4:" // N loop + "mov x10, x13\n" + "whilelt p0.s, x16, x14\n" + "tbnz x5, #0, 5f\n" + "ldr x20, [%x[args], %[offsetof_bias]]\n" + ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n" + "cbz x20, 6f\n" + "fmov z17.s, #1.0\n" + "ld1w { z16.s }, p0/Z, [x20, x16, LSL #2]\n" + ".inst 0x80902620 // fmopa za0.s, p1/M, p1/M, z17.s, z16.s\n" + ".inst 0x80902621 // fmopa za1.s, p1/M, p1/M, z17.s, z16.s\n" + ".inst 0x80902622 // fmopa za2.s, p1/M, p1/M, z17.s, z16.s\n" + ".inst 0x80902623 // fmopa za3.s, p1/M, p1/M, z17.s, z16.s\n" + "5:" // Prepare accumulators: Test for last block + "mov x20, x16\n" + "mov x21, x17\n" + "incw x20\n" + "incw x21, ALL, MUL #4\n" + "cmp x20, x14\n" + "mov x20, x5\n" + "csel x21, x17, x21, LT\n" + "bfm x5, XZR, #0x0, #0x0 // bfc x5, #0x0, #0x1\n" + "cmp x21, x15\n" + "csel x5, x20, x5, LT\n" + "6:" // Prepare accumulators: End + "lsr x23, x8, #0x2\n" + "and x22, x8, #0x3\n" + "cbz x23, 9f\n" + "addvl x21, x10, #8\n" + "addvl x20, x10, #12\n" + "ld1w { z3.s }, p1/Z, [x10]\n" + "subs x23, x23, #0x1\n" + "ld1w { z2.s }, p1/Z, [x10, #1, MUL VL]\n" + "ld1w { z1.s }, p1/Z, [x10, #2, MUL VL]\n" + "ld1w { z0.s }, p1/Z, [x10, #3, MUL VL]\n" + "ld1w { z31.s }, p1/Z, [x10, #4, MUL VL]\n" + "ld1w { z30.s }, p1/Z, [x10, #5, MUL VL]\n" + "ld1w { z29.s }, p1/Z, [x10, #6, MUL VL]\n" + "ld1w { z28.s }, p1/Z, [x10, #7, MUL VL]\n" + "addvl x10, x10, #16\n" + "ld1w { z27.s }, p1/Z, [x21]\n" + "ld1w { z26.s }, p1/Z, [x21, #1, MUL VL]\n" + "ld1w { z25.s }, p1/Z, [x21, #2, MUL VL]\n" + "ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n" + "ld1w { z23.s }, p1/Z, [x20]\n" + "ld1w { z22.s }, p1/Z, [x20, #1, MUL VL]\n" + "ld1w { z21.s }, p1/Z, [x20, #2, MUL VL]\n" + "ld1w { z20.s }, p1/Z, [x20, #3, MUL VL]\n" + "ld1w { z19.s }, p1/Z, [x11]\n" + "ld1w { z18.s }, p1/Z, [x11, #1, MUL VL]\n" + "ld1w { z17.s }, p1/Z, [x11, #2, MUL VL]\n" + "ld1w { z16.s }, p1/Z, [x11, #3, MUL VL]\n" + "addvl x11, x11, #4\n" + "ble 8f\n" + "7:" // K loop + ".inst 0x80932460 // fmopa za0.s, p1/M, p1/M, z3.s, z19.s\n" + "addvl x21, x10, #8\n" + "addvl x20, x10, #12\n" + "ld1w { z3.s }, p1/Z, [x10]\n" + ".inst 0x80932441 // fmopa za1.s, p1/M, p1/M, z2.s, z19.s\n" + "subs x23, x23, #0x1\n" + "ld1w { z2.s }, p1/Z, [x10, #1, MUL VL]\n" + ".inst 0x80932422 // fmopa za2.s, p1/M, p1/M, z1.s, z19.s\n" + "ld1w { z1.s }, p1/Z, [x10, #2, MUL VL]\n" + ".inst 0x80932403 // fmopa za3.s, p1/M, p1/M, z0.s, z19.s\n" + "ld1w { z0.s }, p1/Z, [x10, #3, MUL VL]\n" + ".inst 0x809227e0 // fmopa za0.s, p1/M, p1/M, z31.s, z18.s\n" + "ld1w { z31.s }, p1/Z, [x10, #4, MUL VL]\n" + ".inst 0x809227c1 // fmopa za1.s, p1/M, p1/M, z30.s, z18.s\n" + "ld1w { z30.s }, p1/Z, [x10, #5, MUL VL]\n" + ".inst 0x809227a2 // fmopa za2.s, p1/M, p1/M, z29.s, z18.s\n" + "ld1w { z29.s }, p1/Z, [x10, #6, MUL VL]\n" + ".inst 0x80922783 // fmopa za3.s, p1/M, p1/M, z28.s, z18.s\n" + "ld1w { z28.s }, p1/Z, [x10, #7, MUL VL]\n" + "addvl x10, x10, #16\n" + ".inst 0x80912760 // fmopa za0.s, p1/M, p1/M, z27.s, z17.s\n" + "ld1w { z27.s }, p1/Z, [x21]\n" + ".inst 0x80912741 // fmopa za1.s, p1/M, p1/M, z26.s, z17.s\n" + "ld1w { z26.s }, p1/Z, [x21, #1, MUL VL]\n" + ".inst 0x80912722 // fmopa za2.s, p1/M, p1/M, z25.s, z17.s\n" + "ld1w { z25.s }, p1/Z, [x21, #2, MUL VL]\n" + ".inst 0x80912703 // fmopa za3.s, p1/M, p1/M, z24.s, z17.s\n" + "ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n" + ".inst 0x809026e0 // fmopa za0.s, p1/M, p1/M, z23.s, z16.s\n" + "ld1w { z23.s }, p1/Z, [x20]\n" + ".inst 0x809026c1 // fmopa za1.s, p1/M, p1/M, z22.s, z16.s\n" + "ld1w { z22.s }, p1/Z, [x20, #1, MUL VL]\n" + ".inst 0x809026a2 // fmopa za2.s, p1/M, p1/M, z21.s, z16.s\n" + "ld1w { z21.s }, p1/Z, [x20, #2, MUL VL]\n" + ".inst 0x80902683 // fmopa za3.s, p1/M, p1/M, z20.s, z16.s\n" + "ld1w { z20.s }, p1/Z, [x20, #3, MUL VL]\n" + "ld1w { z19.s }, p1/Z, [x11]\n" + "ld1w { z18.s }, p1/Z, [x11, #1, MUL VL]\n" + "ld1w { z17.s }, p1/Z, [x11, #2, MUL VL]\n" + "ld1w { z16.s }, p1/Z, [x11, #3, MUL VL]\n" + "addvl x11, x11, #4\n" + "bgt 7b\n" + "8:" // K loop tail + ".inst 0x80932460 // fmopa za0.s, p1/M, p1/M, z3.s, z19.s\n" + ".inst 0x80932441 // fmopa za1.s, p1/M, p1/M, z2.s, z19.s\n" + ".inst 0x80932422 // fmopa za2.s, p1/M, p1/M, z1.s, z19.s\n" + ".inst 0x80932403 // fmopa za3.s, p1/M, p1/M, z0.s, z19.s\n" + ".inst 0x809227e0 // fmopa za0.s, p1/M, p1/M, z31.s, z18.s\n" + ".inst 0x809227c1 // fmopa za1.s, p1/M, p1/M, z30.s, z18.s\n" + ".inst 0x809227a2 // fmopa za2.s, p1/M, p1/M, z29.s, z18.s\n" + ".inst 0x80922783 // fmopa za3.s, p1/M, p1/M, z28.s, z18.s\n" + ".inst 0x80912760 // fmopa za0.s, p1/M, p1/M, z27.s, z17.s\n" + ".inst 0x80912741 // fmopa za1.s, p1/M, p1/M, z26.s, z17.s\n" + ".inst 0x80912722 // fmopa za2.s, p1/M, p1/M, z25.s, z17.s\n" + ".inst 0x80912703 // fmopa za3.s, p1/M, p1/M, z24.s, z17.s\n" + ".inst 0x809026e0 // fmopa za0.s, p1/M, p1/M, z23.s, z16.s\n" + ".inst 0x809026c1 // fmopa za1.s, p1/M, p1/M, z22.s, z16.s\n" + ".inst 0x809026a2 // fmopa za2.s, p1/M, p1/M, z21.s, z16.s\n" + ".inst 0x80902683 // fmopa za3.s, p1/M, p1/M, z20.s, z16.s\n" + "9:" // K oddments + "cbz x22, 11f\n" + "10:" // K oddments: Loop + "ld1w { z20.s }, p1/Z, [x10]\n" + "subs x22, x22, #0x1\n" + "ld1w { z19.s }, p1/Z, [x10, #1, MUL VL]\n" + "ld1w { z18.s }, p1/Z, [x10, #2, MUL VL]\n" + "ld1w { z17.s }, p1/Z, [x10, #3, MUL VL]\n" + "addvl x10, x10, #4\n" + "ld1w { z16.s }, p1/Z, [x11]\n" + "addvl x11, x11, #1\n" + ".inst 0x80902680 // fmopa za0.s, p1/M, p1/M, z20.s, z16.s\n" + ".inst 0x80902661 // fmopa za1.s, p1/M, p1/M, z19.s, z16.s\n" + ".inst 0x80902642 // fmopa za2.s, p1/M, p1/M, z18.s, z16.s\n" + ".inst 0x80902623 // fmopa za3.s, p1/M, p1/M, z17.s, z16.s\n" + "bgt 10b\n" + "11:" // K oddments: End + "tbz x5, #1, 15f\n" + "tbz x5, #0, 13f\n" + "mov x12, #0x0\n" + "cntw x9\n" + "cntw x28\n" + "cntw x27, ALL, MUL #2\n" + "cntw x26, ALL, MUL #3\n" + "12:" // Store to partial result buffer: Store and refill: Loop + ".inst 0xe0bf04e0 // st1w { za0h.s[x12] }, p1/Z, [x7, XZR, LSL #2]\n" + ".inst 0xe09f04c0 // ld1w { za0h.s[x12] }, p1/Z, [x6, XZR, LSL #2]\n" + "addvl x25, x7, #4\n" + "addvl x24, x6, #4\n" + ".inst 0xe0bc04e1 // st1w { za0h.s[x12, #1] }, p1/Z, [x7, x28, LSL #2]\n" + ".inst 0xe09c04c1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x6, x28, LSL #2]\n" + "addvl x23, x7, #8\n" + "addvl x22, x6, #8\n" + ".inst 0xe0bb04e2 // st1w { za0h.s[x12, #2] }, p1/Z, [x7, x27, LSL #2]\n" + ".inst 0xe09b04c2 // ld1w { za0h.s[x12, #2] }, p1/Z, [x6, x27, LSL #2]\n" + "addvl x21, x7, #12\n" + "addvl x20, x6, #12\n" + ".inst 0xe0ba04e3 // st1w { za0h.s[x12, #3] }, p1/Z, [x7, x26, LSL #2]\n" + ".inst 0xe09a04c3 // ld1w { za0h.s[x12, #3] }, p1/Z, [x6, x26, LSL #2]\n" + "addvl x7, x7, #16\n" + "addvl x6, x6, #16\n" + ".inst 0xe0bf0724 // st1w { za1h.s[x12] }, p1/Z, [x25, XZR, LSL #2]\n" + ".inst 0xe09f0704 // ld1w { za1h.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n" + ".inst 0xe0bc0725 // st1w { za1h.s[x12, #1] }, p1/Z, [x25, x28, LSL #2]\n" + ".inst 0xe09c0705 // ld1w { za1h.s[x12, #1] }, p1/Z, [x24, x28, LSL #2]\n" + ".inst 0xe0bb0726 // st1w { za1h.s[x12, #2] }, p1/Z, [x25, x27, LSL #2]\n" + ".inst 0xe09b0706 // ld1w { za1h.s[x12, #2] }, p1/Z, [x24, x27, LSL #2]\n" + ".inst 0xe0ba0727 // st1w { za1h.s[x12, #3] }, p1/Z, [x25, x26, LSL #2]\n" + ".inst 0xe09a0707 // ld1w { za1h.s[x12, #3] }, p1/Z, [x24, x26, LSL #2]\n" + ".inst 0xe0bf06e8 // st1w { za2h.s[x12] }, p1/Z, [x23, XZR, LSL #2]\n" + ".inst 0xe09f06c8 // ld1w { za2h.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe0bc06e9 // st1w { za2h.s[x12, #1] }, p1/Z, [x23, x28, LSL #2]\n" + ".inst 0xe09c06c9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n" + ".inst 0xe0bb06ea // st1w { za2h.s[x12, #2] }, p1/Z, [x23, x27, LSL #2]\n" + ".inst 0xe09b06ca // ld1w { za2h.s[x12, #2] }, p1/Z, [x22, x27, LSL #2]\n" + ".inst 0xe0ba06eb // st1w { za2h.s[x12, #3] }, p1/Z, [x23, x26, LSL #2]\n" + ".inst 0xe09a06cb // ld1w { za2h.s[x12, #3] }, p1/Z, [x22, x26, LSL #2]\n" + ".inst 0xe0bf06ac // st1w { za3h.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f068c // ld1w { za3h.s[x12] }, p1/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe0bc06ad // st1w { za3h.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n" + ".inst 0xe09c068d // ld1w { za3h.s[x12, #1] }, p1/Z, [x20, x28, LSL #2]\n" + ".inst 0xe0bb06ae // st1w { za3h.s[x12, #2] }, p1/Z, [x21, x27, LSL #2]\n" + ".inst 0xe09b068e // ld1w { za3h.s[x12, #2] }, p1/Z, [x20, x27, LSL #2]\n" + ".inst 0xe0ba06af // st1w { za3h.s[x12, #3] }, p1/Z, [x21, x26, LSL #2]\n" + ".inst 0xe09a068f // ld1w { za3h.s[x12, #3] }, p1/Z, [x20, x26, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x9\n" + "blt 12b\n" + "b 43f\n" + "13:" // Store to partial result buffer: Store only + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "14:" // Store to partial result buffer: Store only: Loop + ".inst 0xe0bf04e0 // st1w { za0h.s[x12] }, p1/Z, [x7, XZR, LSL #2]\n" + "addvl x22, x7, #4\n" + "addvl x21, x7, #8\n" + ".inst 0xe0b904e1 // st1w { za0h.s[x12, #1] }, p1/Z, [x7, x25, LSL #2]\n" + "addvl x20, x7, #12\n" + ".inst 0xe0b804e2 // st1w { za0h.s[x12, #2] }, p1/Z, [x7, x24, LSL #2]\n" + ".inst 0xe0b704e3 // st1w { za0h.s[x12, #3] }, p1/Z, [x7, x23, LSL #2]\n" + "addvl x7, x7, #16\n" + ".inst 0xe0bf06c4 // st1w { za1h.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe0b906c5 // st1w { za1h.s[x12, #1] }, p1/Z, [x22, x25, LSL #2]\n" + ".inst 0xe0b806c6 // st1w { za1h.s[x12, #2] }, p1/Z, [x22, x24, LSL #2]\n" + ".inst 0xe0b706c7 // st1w { za1h.s[x12, #3] }, p1/Z, [x22, x23, LSL #2]\n" + ".inst 0xe0bf06a8 // st1w { za2h.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe0b906a9 // st1w { za2h.s[x12, #1] }, p1/Z, [x21, x25, LSL #2]\n" + ".inst 0xe0b806aa // st1w { za2h.s[x12, #2] }, p1/Z, [x21, x24, LSL #2]\n" + ".inst 0xe0b706ab // st1w { za2h.s[x12, #3] }, p1/Z, [x21, x23, LSL #2]\n" + ".inst 0xe0bf068c // st1w { za3h.s[x12] }, p1/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe0b9068d // st1w { za3h.s[x12, #1] }, p1/Z, [x20, x25, LSL #2]\n" + ".inst 0xe0b8068e // st1w { za3h.s[x12, #2] }, p1/Z, [x20, x24, LSL #2]\n" + ".inst 0xe0b7068f // st1w { za3h.s[x12, #3] }, p1/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 14b\n" + "b 43f\n" + "15:" // Store to output array + "ldr x26, [%x[args], %[offsetof_C]]\n" + "sub x25, x15, x17\n" + "ldr x24, [%x[args], %[offsetof_ldcb]]\n" + "add x26, x26, x16, LSL #2\n" // C += n + "madd x26, x17, x24, x26\n" // C += m * ldc + "tbz x5, #2, 28f\n" + "cntw x23\n" + "mov x12, #0x0\n" + "cmp x25, x23\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 17f\n" + "16:" // Store to output array: Skip activation: Accumulator row 0 loop + ".inst 0xc0820413 // mova z19.s, p1/M, za0h.s[x12]\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc0820432 // mova z18.s, p1/M, za0h.s[x12, #1]\n" + ".inst 0xc0820451 // mova z17.s, p1/M, za0h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc0820470 // mova z16.s, p1/M, za0h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "cmp x12, x21, LSL #2\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 16b\n" + "17:" // Store to output array: Skip activation: Accumulator row 0 oddments + "cbz x20, 18f\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820412 // mova z18.s, p1/M, za0h.s[x12]\n" + ".inst 0xc0820431 // mova z17.s, p1/M, za0h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc0820450 // mova z16.s, p1/M, za0h.s[x12, #2]\n" + "beq 18f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 18f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "18:" // Store to output array: Skip activation: Accumulator row 0 oddments: End + "subs x25, x25, x22\n" + "beq 28f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 20f\n" + "19:" // Store to output array: Skip activation: Accumulator row 1 loop + ".inst 0xc0820493 // mova z19.s, p1/M, za1h.s[x12]\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc08204b2 // mova z18.s, p1/M, za1h.s[x12, #1]\n" + ".inst 0xc08204d1 // mova z17.s, p1/M, za1h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc08204f0 // mova z16.s, p1/M, za1h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "cmp x12, x21, LSL #2\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 19b\n" + "20:" // Store to output array: Skip activation: Accumulator row 1 oddments + "cbz x20, 21f\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820492 // mova z18.s, p1/M, za1h.s[x12]\n" + ".inst 0xc08204b1 // mova z17.s, p1/M, za1h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc08204d0 // mova z16.s, p1/M, za1h.s[x12, #2]\n" + "beq 21f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 21f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "21:" // Store to output array: Skip activation: Accumulator row 1 oddments: End + "subs x25, x25, x22\n" + "beq 28f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 23f\n" + "22:" // Store to output array: Skip activation: Accumulator row 2 loop + ".inst 0xc0820513 // mova z19.s, p1/M, za2h.s[x12]\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc0820532 // mova z18.s, p1/M, za2h.s[x12, #1]\n" + ".inst 0xc0820551 // mova z17.s, p1/M, za2h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc0820570 // mova z16.s, p1/M, za2h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "cmp x12, x21, LSL #2\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 22b\n" + "23:" // Store to output array: Skip activation: Accumulator row 2 oddments + "cbz x20, 24f\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820512 // mova z18.s, p1/M, za2h.s[x12]\n" + ".inst 0xc0820531 // mova z17.s, p1/M, za2h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc0820550 // mova z16.s, p1/M, za2h.s[x12, #2]\n" + "beq 24f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 24f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "24:" // Store to output array: Skip activation: Accumulator row 2 oddments: End + "subs x25, x25, x22\n" + "beq 28f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 26f\n" + "25:" // Store to output array: Skip activation: Accumulator row 3 loop + ".inst 0xc0820593 // mova z19.s, p1/M, za3h.s[x12]\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc08205b2 // mova z18.s, p1/M, za3h.s[x12, #1]\n" + ".inst 0xc08205d1 // mova z17.s, p1/M, za3h.s[x12, #2]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc08205f0 // mova z16.s, p1/M, za3h.s[x12, #3]\n" + "add x12, x12, #0x4\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "cmp x12, x21, LSL #2\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 25b\n" + "26:" // Store to output array: Skip activation: Accumulator row 3 oddments + "cbz x20, 27f\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820592 // mova z18.s, p1/M, za3h.s[x12]\n" + ".inst 0xc08205b1 // mova z17.s, p1/M, za3h.s[x12, #1]\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + ".inst 0xc08205d0 // mova z16.s, p1/M, za3h.s[x12, #2]\n" + "beq 27f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 27f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "27:" // Store to output array: Skip activation: Accumulator row 3 oddments: End + "subs x25, x25, x22\n" + "beq 28f\n" + "b 41f\n" + "28:" // Store to output array: Skip activation: End + "cntw x23\n" + "ld1rw { z21.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n" + "mov x12, #0x0\n" + "cmp x25, x23\n" + "ld1rw { z20.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 30f\n" + "29:" // Store to output array: Accumulator row 0 loop + ".inst 0xc0820413 // mova z19.s, p1/M, za0h.s[x12]\n" + ".inst 0xc0820432 // mova z18.s, p1/M, za0h.s[x12, #1]\n" + "fmin z19.s, p1/M, z19.s, z20.s\n" + ".inst 0xc0820451 // mova z17.s, p1/M, za0h.s[x12, #2]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc0820470 // mova z16.s, p1/M, za0h.s[x12, #3]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "cmp x12, x21, LSL #2\n" + "fmax z19.s, p1/M, z19.s, z21.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 29b\n" + "30:" // Store to output array: Accumulator row 0 oddments + "cbz x20, 31f\n" + ".inst 0xc0820412 // mova z18.s, p1/M, za0h.s[x12]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820431 // mova z17.s, p1/M, za0h.s[x12, #1]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc0820450 // mova z16.s, p1/M, za0h.s[x12, #2]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 31f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 31f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "31:" // Store to output array: Accumulator row 0 oddments: End + "subs x25, x25, x22\n" + "beq 41f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 33f\n" + "32:" // Store to output array: Accumulator row 1 loop + ".inst 0xc0820493 // mova z19.s, p1/M, za1h.s[x12]\n" + ".inst 0xc08204b2 // mova z18.s, p1/M, za1h.s[x12, #1]\n" + "fmin z19.s, p1/M, z19.s, z20.s\n" + ".inst 0xc08204d1 // mova z17.s, p1/M, za1h.s[x12, #2]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc08204f0 // mova z16.s, p1/M, za1h.s[x12, #3]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "cmp x12, x21, LSL #2\n" + "fmax z19.s, p1/M, z19.s, z21.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 32b\n" + "33:" // Store to output array: Accumulator row 1 oddments + "cbz x20, 34f\n" + ".inst 0xc0820492 // mova z18.s, p1/M, za1h.s[x12]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc08204b1 // mova z17.s, p1/M, za1h.s[x12, #1]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc08204d0 // mova z16.s, p1/M, za1h.s[x12, #2]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 34f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 34f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "34:" // Store to output array: Accumulator row 1 oddments: End + "subs x25, x25, x22\n" + "beq 41f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x22, x25, x23, LT\n" + "lsr x21, x22, #0x2\n" + "and x20, x22, #0x3\n" + "cbz x21, 36f\n" + "35:" // Store to output array: Accumulator row 2 loop + ".inst 0xc0820513 // mova z19.s, p1/M, za2h.s[x12]\n" + ".inst 0xc0820532 // mova z18.s, p1/M, za2h.s[x12, #1]\n" + "fmin z19.s, p1/M, z19.s, z20.s\n" + ".inst 0xc0820551 // mova z17.s, p1/M, za2h.s[x12, #2]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc0820570 // mova z16.s, p1/M, za2h.s[x12, #3]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "cmp x12, x21, LSL #2\n" + "fmax z19.s, p1/M, z19.s, z21.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 35b\n" + "36:" // Store to output array: Accumulator row 2 oddments + "cbz x20, 37f\n" + ".inst 0xc0820512 // mova z18.s, p1/M, za2h.s[x12]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc0820531 // mova z17.s, p1/M, za2h.s[x12, #1]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc0820550 // mova z16.s, p1/M, za2h.s[x12, #2]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 37f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 37f\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "37:" // Store to output array: Accumulator row 2 oddments: End + "subs x25, x25, x22\n" + "beq 41f\n" + "cmp x25, x23\n" + "mov x12, #0x0\n" + "csel x20, x25, x23, LT\n" + "lsr x21, x20, #0x2\n" + "and x20, x20, #0x3\n" + "cbz x21, 39f\n" + "38:" // Store to output array: Accumulator row 3 loop + ".inst 0xc0820593 // mova z19.s, p1/M, za3h.s[x12]\n" + ".inst 0xc08205b2 // mova z18.s, p1/M, za3h.s[x12, #1]\n" + "fmin z19.s, p1/M, z19.s, z20.s\n" + ".inst 0xc08205d1 // mova z17.s, p1/M, za3h.s[x12, #2]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc08205f0 // mova z16.s, p1/M, za3h.s[x12, #3]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "add x12, x12, #0x4\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "cmp x12, x21, LSL #2\n" + "fmax z19.s, p1/M, z19.s, z21.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z19.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "st1w { z16.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "blt 38b\n" + "39:" // Store to output array: Accumulator row 3 oddments + "cbz x20, 40f\n" + ".inst 0xc0820592 // mova z18.s, p1/M, za3h.s[x12]\n" + "subs x20, x20, #0x1\n" + ".inst 0xc08205b1 // mova z17.s, p1/M, za3h.s[x12, #1]\n" + "fmin z18.s, p1/M, z18.s, z20.s\n" + ".inst 0xc08205d0 // mova z16.s, p1/M, za3h.s[x12, #2]\n" + "fmin z17.s, p1/M, z17.s, z20.s\n" + "fmin z16.s, p1/M, z16.s, z20.s\n" + "fmax z18.s, p1/M, z18.s, z21.s\n" + "fmax z17.s, p1/M, z17.s, z21.s\n" + "fmax z16.s, p1/M, z16.s, z21.s\n" + "st1w { z18.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 40f\n" + "subs x20, x20, #0x1\n" + "st1w { z17.s }, p0, [x26]\n" + "add x26, x26, x24\n" + "beq 40f\n" + "st1w { z16.s }, p0, [x26]\n" + "40:" // Store to output array: Accumulator row 3 oddments: End + "41:" // Store to output array: End + "tbz x5, #0, 43f\n" + "mov x12, #0x0\n" + "cntw x26\n" + "cntw x25\n" + "cntw x24, ALL, MUL #2\n" + "cntw x23, ALL, MUL #3\n" + "42:" // Store to output array: Refill accumulators: Loop + "addvl x22, x6, #4\n" + "addvl x21, x6, #8\n" + ".inst 0xe09f04c0 // ld1w { za0h.s[x12] }, p1/Z, [x6, XZR, LSL #2]\n" + "addvl x20, x6, #12\n" + ".inst 0xe09f06c4 // ld1w { za1h.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n" + ".inst 0xe09f06a8 // ld1w { za2h.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n" + ".inst 0xe09f068c // ld1w { za3h.s[x12] }, p1/Z, [x20, XZR, LSL #2]\n" + ".inst 0xe09904c1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x6, x25, LSL #2]\n" + ".inst 0xe09906c5 // ld1w { za1h.s[x12, #1] }, p1/Z, [x22, x25, LSL #2]\n" + ".inst 0xe09906a9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x21, x25, LSL #2]\n" + ".inst 0xe099068d // ld1w { za3h.s[x12, #1] }, p1/Z, [x20, x25, LSL #2]\n" + ".inst 0xe09804c2 // ld1w { za0h.s[x12, #2] }, p1/Z, [x6, x24, LSL #2]\n" + ".inst 0xe09806c6 // ld1w { za1h.s[x12, #2] }, p1/Z, [x22, x24, LSL #2]\n" + ".inst 0xe09806aa // ld1w { za2h.s[x12, #2] }, p1/Z, [x21, x24, LSL #2]\n" + ".inst 0xe098068e // ld1w { za3h.s[x12, #2] }, p1/Z, [x20, x24, LSL #2]\n" + ".inst 0xe09704c3 // ld1w { za0h.s[x12, #3] }, p1/Z, [x6, x23, LSL #2]\n" + "addvl x6, x6, #16\n" + ".inst 0xe09706c7 // ld1w { za1h.s[x12, #3] }, p1/Z, [x22, x23, LSL #2]\n" + ".inst 0xe09706ab // ld1w { za2h.s[x12, #3] }, p1/Z, [x21, x23, LSL #2]\n" + ".inst 0xe097068f // ld1w { za3h.s[x12, #3] }, p1/Z, [x20, x23, LSL #2]\n" + "add x12, x12, #0x4\n" + "cmp x12, x26\n" + "blt 42b\n" + "43:" // End block + "incw x16\n" + "cmp x16, x14\n" + "blt 4b\n" + "incw x17, ALL, MUL #4\n" + "mov x16, #0x0\n" + "cmp x17, x15\n" + "mov x13, x10\n" + "blt 3b\n" + ".inst 0xd503467f // SMSTOP\n" + : + : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)) + : "cc", "memory", "p0", "p1", "p10", "p11", "p12", "p13", "p14", "p15", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x5", "x6", "x7", "x8", "x9", "z0", "z1", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z2", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z3", "z30", "z31", "z4", "z5", "z6", "z7", "z8", "z9" + ); +} + +} // namespace arm_gemm + +#endif // ARM_COMPUTE_ENABLE_SME2