Skip to content

Commit

Permalink
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Browse files Browse the repository at this point in the history
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.

The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.

For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.

BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.

Reviewers: arsenm, tstellarAMD, mareko

Subscribers: arsenm, llvm-commits

Differential Revision: http://reviews.llvm.org/D17277

llvm-svn: 263140
  • Loading branch information
nhaehnle committed Mar 10, 2016
1 parent 8be8de6 commit b142770
Show file tree
Hide file tree
Showing 5 changed files with 257 additions and 15 deletions.
22 changes: 22 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsAMDGPU.td
Expand Up @@ -205,6 +205,28 @@ def int_amdgcn_image_atomic_cmpswap : Intrinsic <
llvm_i1_ty], // slc(imm)
[]>;

def int_amdgcn_buffer_load_format : Intrinsic <
[llvm_v4f32_ty],
[llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // soffset(SGPR)
llvm_i32_ty, // offset(imm)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // voffset(VGPR)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
[IntrReadMem]>;

def int_amdgcn_buffer_store_format : Intrinsic <
[],
[llvm_anyfloat_ty, // vdata(VGPR) -- can currently only select v4f32
llvm_v4i32_ty, // rsrc(SGPR)
llvm_i32_ty, // soffset(SGPR)
llvm_i32_ty, // offset(imm)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // voffset(VGPR)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
[]>;

def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
"__builtin_amdgcn_read_workdim">;
Expand Down
97 changes: 85 additions & 12 deletions llvm/lib/Target/AMDGPU/SIInstructions.td
Expand Up @@ -949,18 +949,23 @@ defm BUFFER_LOAD_FORMAT_XYZ : MUBUF_Load_Helper <
defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <
mubuf<0x03>, "buffer_load_format_xyzw", VReg_128
>;
defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper <
mubuf<0x04>, "buffer_store_format_x", VGPR_32
>;
defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper <
mubuf<0x05>, "buffer_store_format_xy", VReg_64
>;
defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper <
mubuf<0x06>, "buffer_store_format_xyz", VReg_96
>;
defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper <
mubuf<0x07>, "buffer_store_format_xyzw", VReg_128
>;
// Without mayLoad and hasSideEffects, TableGen complains about the pattern
// matching llvm.amdgcn.buffer.store.format. Eventually, we'll need a way
// to express the effects of the intrinsic more precisely.
let mayLoad = 1, hasSideEffects = 1 in {
defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper <
mubuf<0x04>, "buffer_store_format_x", VGPR_32
>;
defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper <
mubuf<0x05>, "buffer_store_format_xy", VReg_64
>;
defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper <
mubuf<0x06>, "buffer_store_format_xyz", VReg_96
>;
defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper <
mubuf<0x07>, "buffer_store_format_xyzw", VReg_128
>;
}
defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <
mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, az_extloadi8_global
>;
Expand Down Expand Up @@ -2093,6 +2098,74 @@ def : Pat <
$src0, $src1, $src2, $src3)
>;

//===----------------------------------------------------------------------===//
// buffer_load/store_format patterns
//===----------------------------------------------------------------------===//
def : Pat<
(int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, 0, 0,
imm:$glc, imm:$slc),
(BUFFER_LOAD_FORMAT_XYZW_OFFSET $rsrc, $soffset, (as_i16imm $offset),
(as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, i32:$vindex, 0,
imm:$glc, imm:$slc),
(BUFFER_LOAD_FORMAT_XYZW_IDXEN $vindex, $rsrc, $soffset, (as_i16imm $offset),
(as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, 0, i32:$voffset,
imm:$glc, imm:$slc),
(BUFFER_LOAD_FORMAT_XYZW_OFFEN $voffset, $rsrc, $soffset, (as_i16imm $offset),
(as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, i32:$vindex, i32:$voffset,
imm:$glc, imm:$slc),
(BUFFER_LOAD_FORMAT_XYZW_BOTHEN
(REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
$rsrc, $soffset, (as_i16imm $offset),
(as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc,
i32:$soffset, imm:$offset, 0, 0,
imm:$glc, imm:$slc),
(BUFFER_STORE_FORMAT_XYZW_OFFSET $vdata, $rsrc, $soffset, (as_i16imm $offset),
(as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc,
i32:$soffset, imm:$offset, i32:$vindex, 0,
imm:$glc, imm:$slc),
(BUFFER_STORE_FORMAT_XYZW_IDXEN $vdata, $vindex, $rsrc, $soffset,
(as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc,
i32:$soffset, imm:$offset, 0, i32:$voffset,
imm:$glc, imm:$slc),
(BUFFER_STORE_FORMAT_XYZW_OFFEN $vdata, $voffset, $rsrc, $soffset,
(as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 0)
>;

def : Pat<
(int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc, i32:$soffset,
imm:$offset, i32:$vindex, i32:$voffset,
imm:$glc, imm:$slc),
(BUFFER_STORE_FORMAT_XYZW_BOTHEN
$vdata,
(REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
$rsrc, $soffset, (as_i16imm $offset),
(as_i1imm $glc), (as_i1imm $slc), 0)
>;

//===----------------------------------------------------------------------===//
// S_GETREG_B32 Intrinsic Pattern.
//===----------------------------------------------------------------------===//
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
Expand Up @@ -236,7 +236,7 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
LLVMContext &Ctx = MF->getFunction()->getContext();
DebugLoc DL = MI->getDebugLoc();
bool IsLoad = TII->get(LoadStoreOp).mayLoad();
bool IsStore = TII->get(LoadStoreOp).mayStore();

bool RanOutOfSGPRs = false;
bool Scavenged = false;
Expand Down Expand Up @@ -272,14 +272,14 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
SOffsetRegState |= RegState::Kill;

BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
.addReg(SubReg, getDefRegState(IsLoad))
.addReg(SubReg, getDefRegState(!IsStore))
.addReg(ScratchRsrcReg)
.addReg(SOffset, SOffsetRegState)
.addImm(Offset)
.addImm(0) // glc
.addImm(0) // slc
.addImm(0) // tfe
.addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
.addReg(Value, RegState::Implicit | getDefRegState(!IsStore))
.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
}
}
Expand Down
69 changes: 69 additions & 0 deletions llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll
@@ -0,0 +1,69 @@
;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s

;CHECK-LABEL: {{^}}buffer_load:
;CHECK: buffer_load_format_xyzw v[0:3], s[0:3], s4
;CHECK: buffer_load_format_xyzw v[4:7], s[0:3], s4 glc
;CHECK: buffer_load_format_xyzw v[8:11], s[0:3], s4 slc
;CHECK: s_waitcnt
define {<4 x float>, <4 x float>, <4 x float>} @buffer_load(<4 x i32> inreg, i32 inreg) #0 {
main_body:
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 0)
%data_glc = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 1, i1 0)
%data_slc = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 1)
%r0 = insertvalue {<4 x float>, <4 x float>, <4 x float>} undef, <4 x float> %data, 0
%r1 = insertvalue {<4 x float>, <4 x float>, <4 x float>} %r0, <4 x float> %data_glc, 1
%r2 = insertvalue {<4 x float>, <4 x float>, <4 x float>} %r1, <4 x float> %data_slc, 2
ret {<4 x float>, <4 x float>, <4 x float>} %r2
}

;CHECK-LABEL: {{^}}buffer_load_immoffs:
;CHECK: buffer_load_format_xyzw v[0:3], s[0:3], s4 offset:42
;CHECK: s_waitcnt
define <4 x float> @buffer_load_immoffs(<4 x i32> inreg, i32 inreg) #0 {
main_body:
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 42, i32 0, i32 0, i1 0, i1 0)
ret <4 x float> %data
}

;CHECK-LABEL: {{^}}buffer_load_idx:
;CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
;CHECK: s_waitcnt
define <4 x float> @buffer_load_idx(<4 x i32> inreg, i32) #0 {
main_body:
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %1, i32 0, i1 0, i1 0)
ret <4 x float> %data
}

;CHECK-LABEL: {{^}}buffer_load_ofs:
;CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 offen
;CHECK: s_waitcnt
define <4 x float> @buffer_load_ofs(<4 x i32> inreg, i32) #0 {
main_body:
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 0, i32 %1, i1 0, i1 0)
ret <4 x float> %data
}

;CHECK-LABEL: {{^}}buffer_load_both:
;CHECK: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
;CHECK: s_waitcnt
define <4 x float> @buffer_load_both(<4 x i32> inreg, i32, i32) #0 {
main_body:
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %1, i32 %2, i1 0, i1 0)
ret <4 x float> %data
}

;CHECK-LABEL: {{^}}buffer_load_both_reversed:
;CHECK: v_mov_b32_e32 v2, v0
;CHECK: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
;CHECK: s_waitcnt
define <4 x float> @buffer_load_both_reversed(<4 x i32> inreg, i32, i32) #0 {
main_body:
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %2, i32 %1, i1 0, i1 0)
ret <4 x float> %data
}

declare <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32>, i32, i32, i32, i32, i1, i1) #1

attributes #0 = { "ShaderType"="0" }
attributes #1 = { nounwind readonly }
78 changes: 78 additions & 0 deletions llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll
@@ -0,0 +1,78 @@
;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s

;CHECK-LABEL: {{^}}buffer_store:
;CHECK: buffer_store_format_xyzw v[0:3], s[0:3], s4
;CHECK: buffer_store_format_xyzw v[4:7], s[0:3], s4 glc
;CHECK: buffer_store_format_xyzw v[8:11], s[0:3], s4 slc
define void @buffer_store(<4 x i32> inreg, i32 inreg, <4 x float>, <4 x float>, <4 x float>) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 0)
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %3, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 1, i1 0)
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %4, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 1)
ret void
}

;CHECK-LABEL: {{^}}buffer_store_immoffs:
;CHECK: buffer_store_format_xyzw v[0:3], s[0:3], s4 offset:42
define void @buffer_store_immoffs(<4 x i32> inreg, i32 inreg, <4 x float>) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 %1, i32 42, i32 0, i32 0, i1 0, i1 0)
ret void
}

;CHECK-LABEL: {{^}}buffer_store_idx:
;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen
define void @buffer_store_idx(<4 x i32> inreg, i32 inreg, <4 x float>, i32) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 0, i1 0, i1 0)
ret void
}

;CHECK-LABEL: {{^}}buffer_store_ofs:
;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 offen
define void @buffer_store_ofs(<4 x i32> inreg, i32 inreg, <4 x float>, i32) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 %3, i1 0, i1 0)
ret void
}

;CHECK-LABEL: {{^}}buffer_store_both:
;CHECK: buffer_store_format_xyzw v[0:3], v[4:5], s[0:3], 0 idxen offen
define void @buffer_store_both(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 %4, i1 0, i1 0)
ret void
}

;CHECK-LABEL: {{^}}buffer_store_both_reversed:
;CHECK: v_mov_b32_e32 v6, v4
;CHECK: buffer_store_format_xyzw v[0:3], v[5:6], s[0:3], 0 idxen offen
define void @buffer_store_both_reversed(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %4, i32 %3, i1 0, i1 0)
ret void
}

; Ideally, the register allocator would avoid the wait here
;
;CHECK-LABEL: {{^}}buffer_store_wait:
;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen
;CHECK: s_waitcnt vmcnt(0) expcnt(0)
;CHECK: buffer_load_format_xyzw v[0:3], v5, s[0:3], 0 idxen
;CHECK: s_waitcnt vmcnt(0)
;CHECK: buffer_store_format_xyzw v[0:3], v6, s[0:3], 0 idxen
define void @buffer_store_wait(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32, i32) #0 {
main_body:
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 0, i1 0, i1 0)
%data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %4, i32 0, i1 0, i1 0)
call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %data, <4 x i32> %0, i32 0, i32 0, i32 %5, i32 0, i1 0, i1 0)
ret void
}

declare void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float>, <4 x i32>, i32, i32, i32, i32, i1, i1) #1
declare <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32>, i32, i32, i32, i32, i1, i1) #2

attributes #0 = { "ShaderType"="0" }
attributes #1 = { nounwind }
attributes #2 = { nounwind readonly }

0 comments on commit b142770

Please sign in to comment.