diff --git a/llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll b/llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll new file mode 100644 index 00000000000000..30339476bce8ce --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck %s + +; Check for optimizing the passed implicit workitem ID based on the +; required group size. This should avoid a few bit packing operations. + +declare hidden void @callee() #0 + +define amdgpu_kernel void @known_x_0(i32 addrspace(1)* %out) !reqd_work_group_size !0 { +; CHECK-LABEL: known_x_0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s9 +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_add_u32 s4, s4, callee@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s5, s5, callee@rel32@hi+12 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] +; CHECK-NEXT: s_endpgm + call void @callee() + ret void +} + +define amdgpu_kernel void @known_y_0(i32 addrspace(1)* %out) !reqd_work_group_size !1 { +; CHECK-LABEL: known_y_0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s9 +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_add_u32 s4, s4, callee@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s5, s5, callee@rel32@hi+12 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] +; CHECK-NEXT: s_endpgm + call void @callee() + ret void +} + +define amdgpu_kernel void @known_z_0(i32 addrspace(1)* %out) !reqd_work_group_size !2 { +; CHECK-LABEL: known_z_0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s9 +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_add_u32 s4, s4, callee@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s5, s5, callee@rel32@hi+12 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] +; CHECK-NEXT: s_endpgm + call void @callee() + ret void +} + +define amdgpu_kernel void @known_yz_0(i32 addrspace(1)* %out) !reqd_work_group_size !3 { +; CHECK-LABEL: known_yz_0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s9 +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_add_u32 s4, s4, callee@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s5, s5, callee@rel32@hi+12 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] +; CHECK-NEXT: s_endpgm + call void @callee() + ret void +} + +define amdgpu_kernel void @known_xz_0(i32 addrspace(1)* %out) !reqd_work_group_size !4 { +; CHECK-LABEL: known_xz_0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s9 +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_add_u32 s4, s4, callee@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s5, s5, callee@rel32@hi+12 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] +; CHECK-NEXT: s_endpgm + call void @callee() + ret void +} + +define amdgpu_kernel void @known_xyz_0(i32 addrspace(1)* %out) !reqd_work_group_size !5 { +; CHECK-LABEL: known_xyz_0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s9 +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_add_u32 s4, s4, callee@rel32@lo+4 +; CHECK-NEXT: s_addc_u32 s5, s5, callee@rel32@hi+12 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] +; CHECK-NEXT: s_endpgm + call void @callee() + ret void +} + +attributes #0 = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" } + +!0 = !{i32 1, i32 64, i32 64} +!1 = !{i32 64, i32 1, i32 64} +!2 = !{i32 64, i32 64, i32 1} +!3 = !{i32 64, i32 1, i32 1} +!4 = !{i32 1, i32 64, i32 1} +!5 = !{i32 1, i32 1, i32 1}