-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[Clang][AArch64] Implement widening FMMLA intrinsics #165282
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -1196,6 +1196,18 @@ def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_fmmla | |||||
| let SVETargetGuard = "f64mm", SMETargetGuard = InvalidMode in { | ||||||
| def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd", "d", MergeNone, "aarch64_sve_fmmla">; | ||||||
|
|
||||||
| let SVETargetGuard = "sve-f16f32mm", SMETargetGuard = InvalidMode in { | ||||||
| def SVMLLA_F32_F16 : SInst<"svmmla[_f32_f16]", "MMdd", "h", MergeNone, "aarch64_sve_fmmla_f16f32", [IsOverloadNone]>; | ||||||
| } | ||||||
|
|
||||||
| let SVETargetGuard = "sve2,f8f32mm", SMETargetGuard = InvalidMode in { | ||||||
| def SVMLLA_F32_MF8 : SInst<"svmmla[_f32_mf8]", "MM~~>", "m", MergeNone, "aarch64_sve_fmmla_mf8f32", [IsOverloadNone]>; | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| } | ||||||
|
|
||||||
| let SVETargetGuard = "sve2,f8f16mm", SMETargetGuard = InvalidMode in { | ||||||
| def SVMLLA_F16_MF8 : SInst<"svmmla[_f16_mf8]", "OO~~>", "m", MergeNone, "aarch64_sve_fmmla_mf8f16", [IsOverloadNone]>; | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| } | ||||||
|
|
||||||
| def SVTRN1Q : SInst<"svtrn1q[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_trn1q">; | ||||||
| def SVTRN2Q : SInst<"svtrn2q[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_trn2q">; | ||||||
| def SVUZP1Q : SInst<"svuzp1q[_{d}]", "ddd", "csilUcUsUiUlhfdb", MergeNone, "aarch64_sve_uzp1q">; | ||||||
|
|
||||||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,33 @@ | ||||||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 | ||||||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +sve-f16f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s | ||||||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +sve-f16f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK | ||||||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve-f16f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s | ||||||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve-f16f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK | ||||||
| // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve-f16f32mm -S -disable-O0-optnone -Werror -Wall -o /dev/null %s | ||||||
|
|
||||||
| // REQUIRES: aarch64-registered-target | ||||||
|
|
||||||
| #include <arm_sve.h> | ||||||
|
|
||||||
| #ifdef SVE_OVERLOADED_FORMS | ||||||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||||||
| #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 | ||||||
| #else | ||||||
| #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you can drop the last two arguments from the macro, because they're not used by the test in this file.
Suggested change
|
||||||
| #endif | ||||||
|
|
||||||
| // CHECK-LABEL: define dso_local <vscale x 4 x float> @test_f32f16( | ||||||
| // CHECK-SAME: <vscale x 4 x float> [[ACC:%.*]], <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) #[[ATTR0:[0-9]+]] { | ||||||
| // CHECK-NEXT: [[ENTRY:.*:]] | ||||||
| // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.f16f32(<vscale x 4 x float> [[ACC]], <vscale x 8 x half> [[A]], <vscale x 8 x half> [[B]]) | ||||||
| // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]] | ||||||
| // | ||||||
| // CPP-CHECK-LABEL: define dso_local <vscale x 4 x float> @_Z11test_f32f16u13__SVFloat32_tu13__SVFloat16_tS0_( | ||||||
| // CPP-CHECK-SAME: <vscale x 4 x float> [[ACC:%.*]], <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) #[[ATTR0:[0-9]+]] { | ||||||
| // CPP-CHECK-NEXT: [[ENTRY:.*:]] | ||||||
| // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.f16f32(<vscale x 4 x float> [[ACC]], <vscale x 8 x half> [[A]], <vscale x 8 x half> [[B]]) | ||||||
| // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]] | ||||||
| // | ||||||
| svfloat32_t test_f32f16(svfloat32_t acc, svfloat16_t a, svfloat16_t b) { | ||||||
| return SVE_ACLE_FUNC(svmmla, _f32_f16, , )(acc, a, b); | ||||||
| } | ||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,35 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f16mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f16mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f16mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f16mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK | ||
| // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f16mm -S -disable-O0-optnone -Werror -Wall -o /dev/null %s | ||
|
|
||
| // REQUIRES: aarch64-registered-target | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| // CHECK-LABEL: define dso_local <vscale x 8 x half> @test_f16mf8( | ||
| // CHECK-SAME: <vscale x 8 x half> [[ACC:%.*]], <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { | ||
| // CHECK-NEXT: [[ENTRY:.*:]] | ||
| // CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) | ||
| // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.fmmla.mf8f16(<vscale x 8 x half> [[ACC]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]) | ||
| // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]] | ||
| // | ||
| // CPP-CHECK-LABEL: define dso_local <vscale x 8 x half> @_Z11test_f16mf8u13__SVFloat16_tu13__SVMfloat8_tS0_m( | ||
| // CPP-CHECK-SAME: <vscale x 8 x half> [[ACC:%.*]], <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { | ||
| // CPP-CHECK-NEXT: [[ENTRY:.*:]] | ||
| // CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) | ||
| // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.fmmla.mf8f16(<vscale x 8 x half> [[ACC]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]) | ||
| // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]] | ||
| // | ||
| svfloat16_t test_f16mf8(svfloat16_t acc, svmfloat8_t a, svmfloat8_t b, fpm_t fpmr) { | ||
| return SVE_ACLE_FUNC(svmmla, _f16_mf8, _fpm, )(acc, a, b, fpmr); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,36 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 | ||
|
|
||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s | ||
| // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f32mm -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK | ||
| // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2 -target-feature +f8f32mm -S -disable-O0-optnone -Werror -Wall -o /dev/null %s | ||
|
|
||
| // REQUIRES: aarch64-registered-target | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| // CHECK-LABEL: define dso_local <vscale x 4 x float> @test_f32mf8( | ||
| // CHECK-SAME: <vscale x 4 x float> [[ACC:%.*]], <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { | ||
| // CHECK-NEXT: [[ENTRY:.*:]] | ||
| // CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) | ||
| // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.mf8f32(<vscale x 4 x float> [[ACC]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]) | ||
| // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]] | ||
| // | ||
| // CPP-CHECK-LABEL: define dso_local <vscale x 4 x float> @_Z11test_f32mf8u13__SVFloat32_tu13__SVMfloat8_tS0_m( | ||
| // CPP-CHECK-SAME: <vscale x 4 x float> [[ACC:%.*]], <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { | ||
| // CPP-CHECK-NEXT: [[ENTRY:.*:]] | ||
| // CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) | ||
| // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.mf8f32(<vscale x 4 x float> [[ACC]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]) | ||
| // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]] | ||
| // | ||
| svfloat32_t test_f32mf8(svfloat32_t acc, svmfloat8_t a, svmfloat8_t b, fpm_t fpmr) { | ||
| return SVE_ACLE_FUNC(svmmla, _f32_mf8, _fpm, )(acc, a, b, fpmr); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2807,6 +2807,20 @@ def int_aarch64_sve_sudot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; | |
| // | ||
| def int_aarch64_sve_fmmla : AdvSIMD_3VectorArg_Intrinsic; | ||
|
|
||
| def int_aarch64_sve_fmmla_f16f32 | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In my opinion, there is no need to add these new intrinsics. I think it might be better to make int_aarch64_sve_fmmla overloaded on 2 types and have use. This will also make it more future-proof as well, allowing it to be used for other widening intrinsics in case they appear. But I understand this will require rewriting tests, so if it becomes too much effort to fix those, we can create new fmmla_widen intrinsic to cover these cases. |
||
| : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty], | ||
| [ llvm_nxv4f32_ty, llvm_nxv8f16_ty, llvm_nxv8f16_ty ], | ||
| [IntrNoMem]>; | ||
|
|
||
| def int_aarch64_sve_fmmla_mf8f32 | ||
| : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty], | ||
| [ llvm_nxv4f32_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty ], | ||
| [IntrNoMem]>; | ||
|
|
||
| def int_aarch64_sve_fmmla_mf8f16 | ||
| : DefaultAttrsIntrinsic<[llvm_nxv8f16_ty], | ||
| [ llvm_nxv8f16_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty ], | ||
| [IntrNoMem]>; | ||
| // | ||
| // SVE ACLE: 7.2. BFloat16 extensions | ||
| // | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3684,7 +3684,7 @@ let Predicates = [HasSVE, HasMatMulFP32] in { | |
| } // End HasSVE, HasMatMulFP32 | ||
|
|
||
| let Predicates = [HasSVE_F16F32MM] in { | ||
| def FMLLA_ZZZ_HtoS : sve_fp_matrix_mla<0b001, "fmmla", ZPR32, ZPR16>; | ||
| defm FMLLA_ZZZ_HtoS : sve_fp_matrix_mla<0b001, "fmmla", ZPR32, ZPR16, int_aarch64_sve_fmmla_f16f32, nxv4f32, nxv8f16>; | ||
| } // End HasSVE_F16F32MM | ||
|
|
||
| let Predicates = [HasSVE, HasMatMulFP64] in { | ||
|
|
@@ -4745,10 +4745,14 @@ defm FMLALLTT_ZZZ : sve2_fp8_mla<0b011, ZPR32, "fmlalltt", nxv4f32, int_aarch64_ | |
|
|
||
| let Predicates = [HasSVE2, HasF8F32MM] in { | ||
| def FMMLA_ZZZ_BtoS : sve2_fp8_mmla<0b0, ZPR32, "fmmla">; | ||
| def : Pat<(nxv4f32 (int_aarch64_sve_fmmla_mf8f32 nxv4f32:$acc, nxv16i8:$zn, nxv16i8:$zm)), | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It will probably be better to copy what was done for sve_fp_matrix_mla and create a multiclass for it, where you bundle pattern and instruction definition together. |
||
| (FMMLA_ZZZ_BtoS $acc, $zn, $zm)>; | ||
| } | ||
|
|
||
| let Predicates = [HasSVE2, HasF8F16MM] in { | ||
| def FMMLA_ZZZ_BtoH : sve2_fp8_mmla<0b1, ZPR16, "fmmla">; | ||
| def : Pat<(nxv8f16 (int_aarch64_sve_fmmla_mf8f16 nxv8f16:$acc, nxv16i8:$zn, nxv16i8:$zm)), | ||
| (FMMLA_ZZZ_BtoH $acc, $zn, $zm)>; | ||
| } | ||
|
|
||
| let Predicates = [HasSSVE_FP8DOT2] in { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,32 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve-f16f32mm < %s | FileCheck %s --check-prefixes=CHECK | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: |
||
|
|
||
| define <vscale x 4 x float> @_Z1tu13__SVFloat32_tu13__SVFloat16_tS0_(<vscale x 4 x float> %acc, <vscale x 8 x half> %a, <vscale x 8 x half> %b) { | ||
| ; CHECK-LABEL: _Z1tu13__SVFloat32_tu13__SVFloat16_tS0_: | ||
| ; CHECK: // %bb.0: // %entry | ||
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill | ||
| ; CHECK-NEXT: addvl sp, sp, #-3 | ||
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG | ||
| ; CHECK-NEXT: .cfi_offset w29, -16 | ||
| ; CHECK-NEXT: str z0, [sp, #2, mul vl] | ||
| ; CHECK-NEXT: fmmla z0.s, z1.h, z2.h | ||
| ; CHECK-NEXT: str z1, [sp, #1, mul vl] | ||
| ; CHECK-NEXT: str z2, [sp] | ||
| ; CHECK-NEXT: addvl sp, sp, #3 | ||
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload | ||
| ; CHECK-NEXT: ret | ||
| entry: | ||
| %acc.addr = alloca <vscale x 4 x float>, align 16 | ||
| %a.addr = alloca <vscale x 8 x half>, align 16 | ||
| %b.addr = alloca <vscale x 8 x half>, align 16 | ||
| store <vscale x 4 x float> %acc, ptr %acc.addr, align 16 | ||
| store <vscale x 8 x half> %a, ptr %a.addr, align 16 | ||
| store <vscale x 8 x half> %b, ptr %b.addr, align 16 | ||
| %0 = load <vscale x 4 x float>, ptr %acc.addr, align 16 | ||
| %1 = load <vscale x 8 x half>, ptr %a.addr, align 16 | ||
| %2 = load <vscale x 8 x half>, ptr %b.addr, align 16 | ||
| %3 = call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.f16f32(<vscale x 4 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is too much unrelated code here. The backend tests should only do intrinsic call. |
||
| ret <vscale x 4 x float> %3 | ||
| } | ||
|
|
||
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmmla.f16f32(<vscale x 4 x float>, <vscale x 8 x half>, <vscale x 8 x half>) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is not necessary anymore |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,39 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sve2,+f8f16mm < %s | FileCheck %s --check-prefixes=CHECK | ||
|
|
||
| define <vscale x 8 x half> @_Z5test2u13__SVFloat16_tu13__SVMfloat8_tS0_m(<vscale x 8 x half> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i64 %fpmr) { | ||
| ; CHECK-LABEL: _Z5test2u13__SVFloat16_tu13__SVMfloat8_tS0_m: | ||
| ; CHECK: // %bb.0: // %entry | ||
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill | ||
| ; CHECK-NEXT: addvl sp, sp, #-3 | ||
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG | ||
| ; CHECK-NEXT: .cfi_offset w29, -16 | ||
| ; CHECK-NEXT: addvl x8, sp, #3 | ||
| ; CHECK-NEXT: str z1, [sp, #1, mul vl] | ||
| ; CHECK-NEXT: str z0, [sp, #2, mul vl] | ||
| ; CHECK-NEXT: str z2, [sp] | ||
| ; CHECK-NEXT: str x0, [x8, #8] | ||
| ; CHECK-NEXT: msr FPMR, x0 | ||
| ; CHECK-NEXT: fmmla z0.h, z1.b, z2.b | ||
| ; CHECK-NEXT: addvl sp, sp, #3 | ||
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload | ||
| ; CHECK-NEXT: ret | ||
| entry: | ||
| %acc.addr = alloca <vscale x 8 x half>, align 16 | ||
| %a.addr = alloca <vscale x 16 x i8>, align 16 | ||
| %b.addr = alloca <vscale x 16 x i8>, align 16 | ||
| %fpmr.addr = alloca i64, align 8 | ||
| store <vscale x 8 x half> %acc, ptr %acc.addr, align 16 | ||
| store <vscale x 16 x i8> %a, ptr %a.addr, align 16 | ||
| store <vscale x 16 x i8> %b, ptr %b.addr, align 16 | ||
| store i64 %fpmr, ptr %fpmr.addr, align 8 | ||
| %0 = load <vscale x 8 x half>, ptr %acc.addr, align 16 | ||
| %1 = load <vscale x 16 x i8>, ptr %a.addr, align 16 | ||
| %2 = load <vscale x 16 x i8>, ptr %b.addr, align 16 | ||
| %3 = load i64, ptr %fpmr.addr, align 8 | ||
| call void @llvm.aarch64.set.fpmr(i64 %3) | ||
| %4 = call <vscale x 8 x half> @llvm.aarch64.sve.fmmla.mf8f16(<vscale x 8 x half> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) | ||
| ret <vscale x 8 x half> %4 | ||
| } | ||
|
|
||
| declare <vscale x 8 x half> @llvm.aarch64.sve.fmmla.mf8f16(<vscale x 8 x half>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,41 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sve2,+f8f32mm < %s | FileCheck %s --check-prefixes=CHECK | ||
|
|
||
| define dso_local <vscale x 4 x float> @_Z5t_varu13__SVFloat32_tu13__SVMfloat8_tS0_m(<vscale x 4 x float> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i64 noundef %fpmr) #0 { | ||
| ; CHECK-LABEL: _Z5t_varu13__SVFloat32_tu13__SVMfloat8_tS0_m: | ||
| ; CHECK: // %bb.0: // %entry | ||
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill | ||
| ; CHECK-NEXT: addvl sp, sp, #-3 | ||
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG | ||
| ; CHECK-NEXT: .cfi_offset w29, -16 | ||
| ; CHECK-NEXT: addvl x8, sp, #3 | ||
| ; CHECK-NEXT: str z1, [sp, #1, mul vl] | ||
| ; CHECK-NEXT: str z0, [sp, #2, mul vl] | ||
| ; CHECK-NEXT: str z2, [sp] | ||
| ; CHECK-NEXT: str x0, [x8, #8] | ||
| ; CHECK-NEXT: msr FPMR, x0 | ||
| ; CHECK-NEXT: fmmla z0.s, z1.b, z2.b | ||
| ; CHECK-NEXT: addvl sp, sp, #3 | ||
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload | ||
| ; CHECK-NEXT: ret | ||
| entry: | ||
| %acc.addr = alloca <vscale x 4 x float>, align 16 | ||
| %a.addr = alloca <vscale x 16 x i8>, align 16 | ||
| %b.addr = alloca <vscale x 16 x i8>, align 16 | ||
| %fpmr.addr = alloca i64, align 8 | ||
| store <vscale x 4 x float> %acc, ptr %acc.addr, align 16 | ||
| store <vscale x 16 x i8> %a, ptr %a.addr, align 16 | ||
| store <vscale x 16 x i8> %b, ptr %b.addr, align 16 | ||
| store i64 %fpmr, ptr %fpmr.addr, align 8 | ||
| %0 = load <vscale x 4 x float>, ptr %acc.addr, align 16 | ||
| %1 = load <vscale x 16 x i8>, ptr %a.addr, align 16 | ||
| %2 = load <vscale x 16 x i8>, ptr %b.addr, align 16 | ||
| %3 = load i64, ptr %fpmr.addr, align 8 | ||
| call void @llvm.aarch64.set.fpmr(i64 %3) | ||
| %4 = call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.mf8f32(<vscale x 4 x float> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) | ||
| ret <vscale x 4 x float> %4 | ||
| } | ||
|
|
||
| declare void @llvm.aarch64.set.fpmr(i64) | ||
|
|
||
| declare <vscale x 4 x float> @llvm.aarch64.sve.fmmla.mf8f32(<vscale x 4 x float>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nit, but I think this would be better :