Skip to content

Commit

Permalink
[AArch64][SVE] Change pointer type of nontemporal load/store intrinsics
Browse files Browse the repository at this point in the history
Summary:
This fixes a discrepancy between the non-temporal loads/store
intrinsics and other SVE load intrinsics (such as nf/ff), so
that Clang can use the same code to generate these intrinsics.

Reviewers: andwar, kmclaughlin, rengolin, efriedma

Reviewed By: efriedma

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D76237
  • Loading branch information
sdesmalen-arm committed Mar 18, 2020
1 parent 940ba14 commit 4788ca4
Show file tree
Hide file tree
Showing 6 changed files with 128 additions and 126 deletions.
13 changes: 3 additions & 10 deletions llvm/include/llvm/IR/IntrinsicsAArch64.td
Expand Up @@ -778,12 +778,6 @@ def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".

class AdvSIMD_1Vec_PredLoad_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMPointerTo<0>],
[IntrReadMem, IntrArgMemOnly]>;

class AdvSIMD_1Vec_PredFaultingLoad_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMPointerToElt<0>],
Expand All @@ -793,7 +787,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: Intrinsic<[],
[llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMPointerTo<0>],
LLVMPointerToElt<0>],
[IntrArgMemOnly, NoCapture<2>]>;

class AdvSIMD_SVE_Index_Intrinsic
Expand Down Expand Up @@ -1289,9 +1283,8 @@ class SVE_gather_prf_vector_base_scalar_offset
//

def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;

def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic;
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic;
def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;

//
// Stores
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Expand Up @@ -8981,7 +8981,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_sve_ldnt1: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(PtrTy->getElementType());
Info.memVT = MVT::getVT(I.getType());
Info.ptrVal = I.getArgOperand(1);
Info.offset = 0;
Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
Expand All @@ -8991,7 +8991,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_sve_stnt1: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(2)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(PtrTy->getElementType());
Info.memVT = MVT::getVT(I.getOperand(0)->getType());
Info.ptrVal = I.getArgOperand(2);
Info.offset = 0;
Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
Expand Down
42 changes: 21 additions & 21 deletions llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
Expand Up @@ -4,85 +4,85 @@
; LDNT1B
;

define <vscale x 16 x i8> @ldnt1b_i8(<vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
define <vscale x 16 x i8> @ldnt1b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
; CHECK-LABEL: ldnt1b_i8:
; CHECK: ldnt1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pred,
<vscale x 16 x i8>* %addr)
i8* %addr)
ret <vscale x 16 x i8> %res
}

;
; LDNT1H
;

define <vscale x 8 x i16> @ldnt1h_i16(<vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
define <vscale x 8 x i16> @ldnt1h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
; CHECK-LABEL: ldnt1h_i16:
; CHECK: ldnt1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %pred,
<vscale x 8 x i16>* %addr)
i16* %addr)
ret <vscale x 8 x i16> %res
}

define <vscale x 8 x half> @ldnt1h_f16(<vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
define <vscale x 8 x half> @ldnt1h_f16(<vscale x 8 x i1> %pred, half* %addr) {
; CHECK-LABEL: ldnt1h_f16:
; CHECK: ldnt1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %pred,
<vscale x 8 x half>* %addr)
half* %addr)
ret <vscale x 8 x half> %res
}

;
; LDNT1W
;

define <vscale x 4 x i32> @ldnt1w_i32(<vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
define <vscale x 4 x i32> @ldnt1w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
; CHECK-LABEL: ldnt1w_i32:
; CHECK: ldnt1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %pred,
<vscale x 4 x i32>* %addr)
i32* %addr)
ret <vscale x 4 x i32> %res
}

define <vscale x 4 x float> @ldnt1w_f32(<vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
define <vscale x 4 x float> @ldnt1w_f32(<vscale x 4 x i1> %pred, float* %addr) {
; CHECK-LABEL: ldnt1w_f32:
; CHECK: ldnt1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %pred,
<vscale x 4 x float>* %addr)
float* %addr)
ret <vscale x 4 x float> %res
}

;
; LDNT1D
;

define <vscale x 2 x i64> @ldnt1d_i64(<vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
define <vscale x 2 x i64> @ldnt1d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
; CHECK-LABEL: ldnt1d_i64:
; CHECK: ldnt1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %pred,
<vscale x 2 x i64>* %addr)
i64* %addr)
ret <vscale x 2 x i64> %res
}

define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, double* %addr) {
; CHECK-LABEL: ldnt1d_f64:
; CHECK: ldnt1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %pred,
<vscale x 2 x double>* %addr)
double* %addr)
ret <vscale x 2 x double> %res
}

declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>*)
declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>*)
declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>*)
declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>*)
declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>*)
declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>*)
declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>*)
declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, i8*)
declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, i16*)
declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, i32*)
declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, i64*)
declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, half*)
declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, float*)
declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, double*)
42 changes: 21 additions & 21 deletions llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll
Expand Up @@ -4,92 +4,92 @@
; STNT1B
;

define void @stnt1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
define void @stnt1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %addr) {
; CHECK-LABEL: stnt1b_i8:
; CHECK: stnt1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data,
<vscale x 16 x i1> %pred,
<vscale x 16 x i8>* %addr)
i8* %addr)
ret void
}

;
; STNT1H
;

define void @stnt1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
define void @stnt1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %addr) {
; CHECK-LABEL: stnt1h_i16:
; CHECK: stnt1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data,
<vscale x 8 x i1> %pred,
<vscale x 8 x i16>* %addr)
i16* %addr)
ret void
}

define void @stnt1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
define void @stnt1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %addr) {
; CHECK-LABEL: stnt1h_f16:
; CHECK: stnt1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half> %data,
<vscale x 8 x i1> %pred,
<vscale x 8 x half>* %addr)
half* %addr)
ret void
}

;
; STNT1W
;

define void @stnt1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
define void @stnt1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %addr) {
; CHECK-LABEL: stnt1w_i32:
; CHECK: stnt1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data,
<vscale x 4 x i1> %pred,
<vscale x 4 x i32>* %addr)
i32* %addr)
ret void
}

define void @stnt1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
define void @stnt1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %addr) {
; CHECK-LABEL: stnt1w_f32:
; CHECK: stnt1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float> %data,
<vscale x 4 x i1> %pred,
<vscale x 4 x float>* %addr)
float* %addr)
ret void
}

;
; STNT1D
;

define void @stnt1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
define void @stnt1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %addr) {
; CHECK-LABEL: stnt1d_i64:
; CHECK: stnt1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data,
<vscale x 2 x i1> %pred,
<vscale x 2 x i64>* %addr)
i64* %addr)
ret void
}

define void @stnt1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
define void @stnt1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %addr) {
; CHECK-LABEL: stnt1d_f64:
; CHECK: stnt1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double> %data,
<vscale x 2 x i1> %pred,
<vscale x 2 x double>* %addr)
double* %addr)
ret void
}

declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)

0 comments on commit 4788ca4

Please sign in to comment.