@@ -1,16 +1,72 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefixes=R600 %s
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=SI %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
; FUNC-LABEL: {{^}}rotl_i32:
; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
; R600-NEXT: 32
; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
; SI: s_sub_i32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
; SI: v_mov_b32_e32 [[VDST:v[0-9]+]], [[SDST]]
; SI: v_alignbit_b32 {{v[0-9]+, [s][0-9]+, s[0-9]+}}, [[VDST]]
define amdgpu_kernel void @rotl_i32 (ptr addrspace (1 ) %in , i32 %x , i32 %y ) {
; R600-LABEL: rotl_i32:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: SUB_INT * T0.W, literal.x, KC0[2].W,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.X, KC0[2].Z, KC0[2].Z, PV.W,
; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
;
; SI-LABEL: rotl_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_sub_i32 s3, 32, s3
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: v_mov_b32_e32 v0, s3
; SI-NEXT: v_alignbit_b32 v0, s2, s2, v0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; GFX8-LABEL: rotl_i32:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_sub_i32 s3, 32, s3
; GFX8-NEXT: v_mov_b32_e32 v0, s3
; GFX8-NEXT: v_alignbit_b32 v2, s2, s2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: flat_store_dword v[0:1], v2
; GFX8-NEXT: s_endpgm
;
; GFX10-LABEL: rotl_i32:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX10-NEXT: v_mov_b32_e32 v0, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_i32 s3, 32, s3
; GFX10-NEXT: v_alignbit_b32 v1, s2, s2, s3
; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: rotl_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; GFX11-NEXT: v_mov_b32_e32 v0, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_sub_i32 s3, 32, s3
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_alignbit_b32 v1, s2, s2, s3
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
entry:
%0 = shl i32 %x , %y
%1 = sub i32 32 , %y
Expand All
@@ -20,13 +76,84 @@ entry:
ret void
}
; FUNC-LABEL: {{^}}rotl_v2i32:
; SI-DAG: s_sub_i32
; SI-DAG: s_sub_i32
; SI-DAG: v_alignbit_b32
; SI-DAG: v_alignbit_b32
; SI: s_endpgm
define amdgpu_kernel void @rotl_v2i32 (ptr addrspace (1 ) %in , <2 x i32 > %x , <2 x i32 > %y ) {
; R600-LABEL: rotl_v2i32:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: SUB_INT * T0.W, literal.x, KC0[3].Z,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.Y, KC0[3].X, KC0[3].X, PV.W,
; R600-NEXT: SUB_INT * T0.W, literal.x, KC0[3].Y,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.X, KC0[2].W, KC0[2].W, PV.W,
; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
;
; SI-LABEL: rotl_v2i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_sub_i32 s7, 32, s7
; SI-NEXT: s_sub_i32 s6, 32, s6
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: v_alignbit_b32 v1, s5, s5, v0
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_alignbit_b32 v0, s4, s4, v0
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX8-LABEL: rotl_v2i32:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_sub_i32 s2, 32, s6
; GFX8-NEXT: s_sub_i32 s3, 32, s7
; GFX8-NEXT: v_mov_b32_e32 v0, s3
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_alignbit_b32 v1, s5, s5, v0
; GFX8-NEXT: v_alignbit_b32 v0, s4, s4, v2
; GFX8-NEXT: v_mov_b32_e32 v3, s1
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8-NEXT: s_endpgm
;
; GFX10-LABEL: rotl_v2i32:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_i32 s0, 32, s7
; GFX10-NEXT: s_sub_i32 s1, 32, s6
; GFX10-NEXT: v_alignbit_b32 v1, s5, s5, s0
; GFX10-NEXT: v_alignbit_b32 v0, s4, s4, s1
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: rotl_v2i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x2c
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_sub_i32 s2, 32, s7
; GFX11-NEXT: s_sub_i32 s3, 32, s6
; GFX11-NEXT: v_alignbit_b32 v1, s5, s5, s2
; GFX11-NEXT: v_alignbit_b32 v0, s4, s4, s3
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
entry:
%0 = shl <2 x i32 > %x , %y
%1 = sub <2 x i32 > <i32 32 , i32 32 >, %y
Expand All
@@ -36,17 +163,110 @@ entry:
ret void
}
; FUNC-LABEL: {{^}}rotl_v4i32:
; SI-DAG: s_sub_i32
; SI-DAG: v_alignbit_b32
; SI-DAG: s_sub_i32
; SI-DAG: v_alignbit_b32
; SI-DAG: s_sub_i32
; SI-DAG: v_alignbit_b32
; SI-DAG: s_sub_i32
; SI-DAG: v_alignbit_b32
; SI: s_endpgm
define amdgpu_kernel void @rotl_v4i32 (ptr addrspace (1 ) %in , <4 x i32 > %x , <4 x i32 > %y ) {
; R600-LABEL: rotl_v4i32:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 13, @4, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 4:
; R600-NEXT: SUB_INT * T0.W, literal.x, KC0[5].X,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.W, KC0[4].X, KC0[4].X, PV.W,
; R600-NEXT: SUB_INT * T1.W, literal.x, KC0[4].W,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.Z, KC0[3].W, KC0[3].W, PS,
; R600-NEXT: SUB_INT * T1.W, literal.x, KC0[4].Z,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.Y, KC0[3].Z, KC0[3].Z, PV.W,
; R600-NEXT: SUB_INT * T1.W, literal.x, KC0[4].Y,
; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; R600-NEXT: BIT_ALIGN_INT T0.X, KC0[3].Y, KC0[3].Y, PV.W,
; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
;
; SI-LABEL: rotl_v4i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0xd
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_sub_i32 s8, 32, s8
; SI-NEXT: s_sub_i32 s9, 32, s9
; SI-NEXT: s_sub_i32 s11, 32, s11
; SI-NEXT: s_sub_i32 s10, 32, s10
; SI-NEXT: v_mov_b32_e32 v0, s11
; SI-NEXT: v_alignbit_b32 v3, s7, s7, v0
; SI-NEXT: v_mov_b32_e32 v0, s10
; SI-NEXT: v_alignbit_b32 v2, s6, s6, v0
; SI-NEXT: v_mov_b32_e32 v0, s9
; SI-NEXT: v_alignbit_b32 v1, s5, s5, v0
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_alignbit_b32 v0, s4, s4, v0
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX8-LABEL: rotl_v4i32:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_sub_i32 s3, 32, s9
; GFX8-NEXT: s_sub_i32 s9, 32, s11
; GFX8-NEXT: s_sub_i32 s2, 32, s8
; GFX8-NEXT: s_sub_i32 s8, 32, s10
; GFX8-NEXT: v_mov_b32_e32 v0, s9
; GFX8-NEXT: v_alignbit_b32 v3, s7, s7, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s8
; GFX8-NEXT: v_alignbit_b32 v2, s6, s6, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s3
; GFX8-NEXT: v_alignbit_b32 v1, s5, s5, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NEXT: v_mov_b32_e32 v5, s1
; GFX8-NEXT: v_alignbit_b32 v0, s4, s4, v0
; GFX8-NEXT: v_mov_b32_e32 v4, s0
; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NEXT: s_endpgm
;
; GFX10-LABEL: rotl_v4i32:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX10-NEXT: v_mov_b32_e32 v4, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_i32 s0, 32, s8
; GFX10-NEXT: s_sub_i32 s1, 32, s9
; GFX10-NEXT: s_sub_i32 s8, 32, s11
; GFX10-NEXT: s_sub_i32 s9, 32, s10
; GFX10-NEXT: v_alignbit_b32 v3, s7, s7, s8
; GFX10-NEXT: v_alignbit_b32 v2, s6, s6, s9
; GFX10-NEXT: v_alignbit_b32 v1, s5, s5, s1
; GFX10-NEXT: v_alignbit_b32 v0, s4, s4, s0
; GFX10-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: rotl_v4i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b256 s[4:11], s[0:1], 0x34
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
; GFX11-NEXT: v_mov_b32_e32 v4, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_sub_i32 s2, 32, s8
; GFX11-NEXT: s_sub_i32 s3, 32, s9
; GFX11-NEXT: s_sub_i32 s8, 32, s11
; GFX11-NEXT: s_sub_i32 s9, 32, s10
; GFX11-NEXT: v_alignbit_b32 v3, s7, s7, s8
; GFX11-NEXT: v_alignbit_b32 v2, s6, s6, s9
; GFX11-NEXT: v_alignbit_b32 v1, s5, s5, s3
; GFX11-NEXT: v_alignbit_b32 v0, s4, s4, s2
; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
entry:
%0 = shl <4 x i32 > %x , %y
%1 = sub <4 x i32 > <i32 32 , i32 32 , i32 32 , i32 32 >, %y
Expand All
@@ -56,20 +276,120 @@ entry:
ret void
}
; GCN-LABEL: @test_rotl_i16
; GCN: global_load_ushort [[X:v[0-9]+]]
; GCN: global_load_ushort [[D:v[0-9]+]]
; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
; GCN: v_lshlrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
; GCN: v_lshrrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
declare i16 @llvm.fshl.i16 (i16 , i16 , i16 )
define void @test_rotl_i16 (ptr addrspace (1 ) nocapture readonly %sourceA , ptr addrspace (1 ) nocapture readonly %sourceB , ptr addrspace (1 ) nocapture %destValues ) {
; R600-LABEL: test_rotl_i16:
; R600: ; %bb.0: ; %entry
; R600-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[]
; R600-NEXT: TEX 0 @8
; R600-NEXT: ALU 0, @13, KC0[CB0:0-32], KC1[]
; R600-NEXT: TEX 0 @10
; R600-NEXT: ALU 21, @14, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: Fetch clause starting at 8:
; R600-NEXT: VTX_READ_16 T0.X, T0.X, 48, #1
; R600-NEXT: Fetch clause starting at 10:
; R600-NEXT: VTX_READ_16 T1.X, T1.X, 32, #1
; R600-NEXT: ALU clause starting at 12:
; R600-NEXT: MOV * T0.X, KC0[2].Z,
; R600-NEXT: ALU clause starting at 13:
; R600-NEXT: MOV * T1.X, KC0[2].Y,
; R600-NEXT: ALU clause starting at 14:
; R600-NEXT: SUB_INT T0.W, 0.0, T0.X,
; R600-NEXT: AND_INT * T1.W, T0.X, literal.x,
; R600-NEXT: 15(2.101948e-44), 0(0.000000e+00)
; R600-NEXT: AND_INT * T0.W, PV.W, literal.x,
; R600-NEXT: 15(2.101948e-44), 0(0.000000e+00)
; R600-NEXT: LSHR T0.Z, T1.X, PV.W,
; R600-NEXT: LSHL T0.W, T1.X, T1.W,
; R600-NEXT: ADD_INT * T1.W, KC0[2].W, literal.x,
; R600-NEXT: 8(1.121039e-44), 0(0.000000e+00)
; R600-NEXT: AND_INT T2.W, PS, literal.x,
; R600-NEXT: OR_INT * T0.W, PV.W, PV.Z,
; R600-NEXT: 3(4.203895e-45), 0(0.000000e+00)
; R600-NEXT: AND_INT T0.W, PS, literal.x,
; R600-NEXT: LSHL * T2.W, PV.W, literal.y,
; R600-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
; R600-NEXT: LSHL T0.X, PV.W, PS,
; R600-NEXT: LSHL * T0.W, literal.x, PS,
; R600-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
; R600-NEXT: MOV T0.Y, 0.0,
; R600-NEXT: MOV * T0.Z, 0.0,
; R600-NEXT: LSHR * T1.X, T1.W, literal.x,
; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00)
;
; SI-LABEL: test_rotl_i16:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[4:7], 0 addr64 offset:48
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v1, 15, v2
; SI-NEXT: v_sub_i32_e32 v2, vcc, 0, v2
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, v1, v0
; SI-NEXT: v_and_b32_e32 v2, 15, v2
; SI-NEXT: v_lshrrev_b32_e32 v0, v2, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_store_short v0, v[4:5], s[4:7], 0 addr64 offset:8
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: test_rotl_i16:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 48, v2
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GFX8-NEXT: flat_load_ushort v2, v[2:3]
; GFX8-NEXT: flat_load_ushort v0, v[0:1]
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_lshlrev_b16_e32 v1, v2, v0
; GFX8-NEXT: v_sub_u16_e32 v2, 0, v2
; GFX8-NEXT: v_lshrrev_b16_e32 v0, v2, v0
; GFX8-NEXT: v_or_b32_e32 v2, v1, v0
; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v4
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
; GFX8-NEXT: flat_store_short v[0:1], v2
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: test_rotl_i16:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: global_load_ushort v6, v[2:3], off offset:48
; GFX10-NEXT: global_load_ushort v7, v[0:1], off offset:32
; GFX10-NEXT: s_waitcnt vmcnt(1)
; GFX10-NEXT: v_sub_nc_u16 v0, 0, v6
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_lshlrev_b16 v1, v6, v7
; GFX10-NEXT: v_lshrrev_b16 v0, v0, v7
; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
; GFX10-NEXT: global_store_short v[4:5], v0, off offset:8
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: test_rotl_i16:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: global_load_u16 v2, v[2:3], off offset:48
; GFX11-NEXT: global_load_u16 v0, v[0:1], off offset:32
; GFX11-NEXT: s_waitcnt vmcnt(1)
; GFX11-NEXT: v_sub_nc_u16 v1, 0, v2
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_lshlrev_b16 v2, v2, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_lshrrev_b16 v0, v1, v0
; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
; GFX11-NEXT: global_store_b16 v[4:5], v0, off offset:8
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%arrayidx = getelementptr inbounds i16 , ptr addrspace (1 ) %sourceA , i64 16
%a = load i16 , ptr addrspace (1 ) %arrayidx
Expand Down