| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,62 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfadd_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfadd_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfadd z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfadd_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfadd_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfadd z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfadd_u(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfadd_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfadd z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfadd_u_ptrue(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfadd_u_ptrue: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfadd z0.h, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfadd_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfadd_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfadd z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfclamp(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfclamp: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfclamp z0.h, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fclamp.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fclamp.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmax_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmax_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmax z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmax(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmax: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfmax z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmax_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmax_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfmax z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmax_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmax_u_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmax z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmax_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmax_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfmax z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmax_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmax_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfmax z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmax.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmaxnm_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmaxnm_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmaxnm(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmaxnm: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmaxnm_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmaxnm_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmaxnm_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmaxnm_u_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmaxnm_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmaxnm_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmaxnm_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmaxnm_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfmaxnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmin_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmin_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmin z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmin(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmin: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfmin z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmin_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmin_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfmin z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmin_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmin_u_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmin z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmin_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmin_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfmin z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmin_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmin_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfmin z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmin.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfminnm_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfminnm_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfminnm(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfminnm: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfminnm_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfminnm_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfminnm_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfminnm_u_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfminnm_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfminnm_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: ptrue p0.h | ||
| ; CHECK-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfminnm_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfminnm_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfminnm z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,35 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmla_m(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmla_m: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmla z0.h, p0/m, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmla_x(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmla_x: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmla z0.h, p0/m, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmla_z(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmla_z: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z3.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z3.h | ||
| ; CHECK-NEXT: bfmla z0.h, p0/m, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a_z, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,31 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmla_lane_idx1(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmla_lane_idx1: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmla z0.h, z1.h, z2.h[1] | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.lane.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 1) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmla_lane_idx3(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmla_lane_idx3: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmla z0.h, z1.h, z2.h[3] | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.lane.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 3) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmla_lane_idx7(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmla_lane_idx7: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmla z0.h, z1.h, z2.h[7] | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.lane.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 7) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmla.lane.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,43 +1,36 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmls_m(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmls_m: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmls z0.h, p0/m, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmls_x(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmls_x: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmls z0.h, p0/m, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
|
|
||
| define <vscale x 8 x bfloat> @bfmls_z(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmls_z: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z3.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z3.h | ||
| ; CHECK-NEXT: bfmls z0.h, p0/m, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a_z, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,31 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmls_lane_idx1(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmls_lane_idx1: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmls z0.h, z1.h, z2.h[1] | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.lane.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 1) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmls_lane_idx3(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmls_lane_idx3: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmls z0.h, z1.h, z2.h[3] | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.lane.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 3) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmls_lane_idx7(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){ | ||
| ; CHECK-LABEL: bfmls_lane_idx7: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmls z0.h, z1.h, z2.h[7] | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.lane.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 7) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmls.lane.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,43 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 4 x float> @bfmlslb_f32(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) { | ||
| ; CHECK-LABEL: bfmlslb_f32: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmlslb z0.s, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlslb(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) | ||
| ret <vscale x 4 x float> %out | ||
| } | ||
|
|
||
| define <vscale x 4 x float> @bfmlslt_f32(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) { | ||
| ; CHECK-LABEL: bfmlslt_f32: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmlslt z0.s, z1.h, z2.h | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlslt(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) | ||
| ret <vscale x 4 x float> %out | ||
| } | ||
|
|
||
| define <vscale x 4 x float> @bfmlslb_lane_f32(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) { | ||
| ; CHECK-LABEL: bfmlslb_lane_f32: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmlslb z0.s, z1.h, z2.h[7] | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlslb.lane(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm, i32 7) | ||
| ret <vscale x 4 x float> %out | ||
| } | ||
|
|
||
| define <vscale x 4 x float> @bfmlslt_lane_f32(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) { | ||
| ; CHECK-LABEL: bfmlslt_lane_f32: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmlslt z0.s, z1.h, z2.h[7] | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlslt.lane(<vscale x 4 x float> %zda, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm, i32 7) | ||
| ret <vscale x 4 x float> %out | ||
| } | ||
|
|
||
| declare <vscale x 4 x float> @llvm.aarch64.sve.bfmlslb(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 4 x float> @llvm.aarch64.sve.bfmlslt(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 4 x float> @llvm.aarch64.sve.bfmlslb.lane(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) | ||
| declare <vscale x 4 x float> @llvm.aarch64.sve.bfmlslt.lane(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,62 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmul_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmul_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmul_u_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfmul_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmul z0.h, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmul_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,37 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -verify-machineinstrs < %s | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_lane_idx1(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmul_lane_idx1: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmul z0.h, z0.h, z1.h[1] | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.lane.nxv8bf16(<vscale x 8 x bfloat> %a, | ||
| <vscale x 8 x bfloat> %b, | ||
| i32 1) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_lane_idx3(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmul_lane_idx3: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmul z0.h, z0.h, z1.h[3] | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.lane.nxv8bf16(<vscale x 8 x bfloat> %a, | ||
| <vscale x 8 x bfloat> %b, | ||
| i32 3) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfmul_lane_idx7(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfmul_lane_idx7: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfmul z0.h, z0.h, z1.h[7] | ||
| ; CHECK-NEXT: ret | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.lane.nxv8bf16(<vscale x 8 x bfloat> %a, | ||
| <vscale x 8 x bfloat> %b, | ||
| i32 7) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.lane.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,62 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ | ||
| ; RUN: | FileCheck %s | ||
|
|
||
| define <vscale x 8 x bfloat> @bfsub_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfsub_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfsub z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfsub_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfsub_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h | ||
| ; CHECK-NEXT: bfsub z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfsub_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfsub_u_pred: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfsub z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfsub_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){ | ||
| ; CHECK-LABEL: bfsub_u: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: bfsub z0.h, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %elt = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) | ||
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1> %elt, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %res | ||
| } | ||
|
|
||
| define <vscale x 8 x bfloat> @bfsub_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { | ||
| ; CHECK-LABEL: bfsub_u_zeroing: | ||
| ; CHECK: // %bb.0: | ||
| ; CHECK-NEXT: mov z2.h, #0 // =0x0 | ||
| ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h | ||
| ; CHECK-NEXT: bfsub z0.h, p0/m, z0.h, z1.h | ||
| ; CHECK-NEXT: ret | ||
| %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer | ||
| %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1> %pg, | ||
| <vscale x 8 x bfloat> %a_z, | ||
| <vscale x 8 x bfloat> %b) | ||
| ret <vscale x 8 x bfloat> %out | ||
| } | ||
|
|
||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg) |