208 changes: 208 additions & 0 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -march=amdgcn -mcpu=fiji -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs -o - %s | FileCheck %s

; TODO: Could potentially insert it here
define void @arg_align_8(i8 addrspace(1)* align 8 %arg0) {
; CHECK-LABEL: name: arg_align_8
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; CHECK-NEXT: G_STORE [[C]](s8), [[MV]](p1) :: (store (s8) into %ir.arg0, align 8, addrspace 1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
; CHECK-NEXT: S_SETPC_B64_return [[COPY3]]
store i8 0, i8 addrspace(1)* %arg0, align 8
ret void
}

declare i8 addrspace(1)* @returns_ptr()
declare align 8 i8 addrspace(1)* @returns_ptr_align8()

define void @call_result_align_1() {
; CHECK-LABEL: name: call_result_align_1
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
; CHECK-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
; CHECK-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
; CHECK-NEXT: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
; CHECK-NEXT: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY9]](p4)
; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[COPY10]](p4)
; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[COPY11]](p4)
; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY12]](s64)
; CHECK-NEXT: $sgpr12 = COPY [[COPY13]](s32)
; CHECK-NEXT: $sgpr13 = COPY [[COPY14]](s32)
; CHECK-NEXT: $sgpr14 = COPY [[COPY15]](s32)
; CHECK-NEXT: $vgpr31 = COPY [[COPY16]](s32)
; CHECK-NEXT: $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
; CHECK-NEXT: G_STORE [[C]](s8), [[MV]](p1) :: (store (s8) into %ir.ptr, addrspace 1)
; CHECK-NEXT: [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
; CHECK-NEXT: S_SETPC_B64_return [[COPY20]]
%ptr = call align 1 i8 addrspace(1)* @returns_ptr()
store i8 0, i8 addrspace(1)* %ptr, align 1
ret void
}

define void @call_result_align_8() {
; CHECK-LABEL: name: call_result_align_8
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
; CHECK-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
; CHECK-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
; CHECK-NEXT: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
; CHECK-NEXT: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY9]](p4)
; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[COPY10]](p4)
; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[COPY11]](p4)
; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY12]](s64)
; CHECK-NEXT: $sgpr12 = COPY [[COPY13]](s32)
; CHECK-NEXT: $sgpr13 = COPY [[COPY14]](s32)
; CHECK-NEXT: $sgpr14 = COPY [[COPY15]](s32)
; CHECK-NEXT: $vgpr31 = COPY [[COPY16]](s32)
; CHECK-NEXT: $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
; CHECK-NEXT: [[ASSERT_ALIGN:%[0-9]+]]:_(p1) = G_ASSERT_ALIGN [[MV]], 8
; CHECK-NEXT: G_STORE [[C]](s8), [[ASSERT_ALIGN]](p1) :: (store (s8) into %ir.ptr, align 8, addrspace 1)
; CHECK-NEXT: [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
; CHECK-NEXT: S_SETPC_B64_return [[COPY20]]
%ptr = call align 8 i8 addrspace(1)* @returns_ptr()
store i8 0, i8 addrspace(1)* %ptr, align 8
ret void
}

define void @declaration_result_align_8() {
; CHECK-LABEL: name: declaration_result_align_8
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
; CHECK-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
; CHECK-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
; CHECK-NEXT: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
; CHECK-NEXT: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr_align8
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY9]](p4)
; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[COPY10]](p4)
; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[COPY11]](p4)
; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY12]](s64)
; CHECK-NEXT: $sgpr12 = COPY [[COPY13]](s32)
; CHECK-NEXT: $sgpr13 = COPY [[COPY14]](s32)
; CHECK-NEXT: $sgpr14 = COPY [[COPY15]](s32)
; CHECK-NEXT: $vgpr31 = COPY [[COPY16]](s32)
; CHECK-NEXT: $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr_align8, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
; CHECK-NEXT: [[ASSERT_ALIGN:%[0-9]+]]:_(p1) = G_ASSERT_ALIGN [[MV]], 8
; CHECK-NEXT: G_STORE [[C]](s8), [[ASSERT_ALIGN]](p1) :: (store (s8) into %ir.ptr, align 8, addrspace 1)
; CHECK-NEXT: [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
; CHECK-NEXT: S_SETPC_B64_return [[COPY20]]
%ptr = call i8 addrspace(1)* @returns_ptr_align8()
store i8 0, i8 addrspace(1)* %ptr, align 8
ret void
}

define i8 addrspace(1)* @tail_call_assert_align() {
; CHECK-LABEL: name: tail_call_assert_align
; CHECK: bb.1.entry:
; CHECK-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
; CHECK-NEXT: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
; CHECK-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
; CHECK-NEXT: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
; CHECK-NEXT: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK-NEXT: [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @returns_ptr_align8
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY9]](p4)
; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[COPY10]](p4)
; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[COPY11]](p4)
; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY12]](s64)
; CHECK-NEXT: $sgpr12 = COPY [[COPY13]](s32)
; CHECK-NEXT: $sgpr13 = COPY [[COPY14]](s32)
; CHECK-NEXT: $sgpr14 = COPY [[COPY15]](s32)
; CHECK-NEXT: $vgpr31 = COPY [[COPY16]](s32)
; CHECK-NEXT: SI_TCRETURN [[GV]](p0), @returns_ptr_align8, 0, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
entry:
%call = tail call i8 addrspace(1)* @returns_ptr_align8()
ret i8 addrspace(1)* %call
}
62 changes: 62 additions & 0 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-assert-align.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass=regbankselect %s -verify-machineinstrs -o - | FileCheck %s

---
name: assert_align_vgpr
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: assert_align_vgpr
; CHECK: liveins: $vgpr0_vgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %copy:vgpr(p1) = COPY $vgpr0_vgpr1
; CHECK-NEXT: %assert_align:vgpr(p1) = G_ASSERT_ALIGN %copy, 4
; CHECK-NEXT: S_ENDPGM 0, implicit %assert_align(p1)
%copy:_(p1) = COPY $vgpr0_vgpr1
%assert_align:_(p1) = G_ASSERT_ALIGN %copy, 4
S_ENDPGM 0, implicit %assert_align
...

---
name: assert_align_sgpr
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr8_sgpr9
; CHECK-LABEL: name: assert_align_sgpr
; CHECK: liveins: $sgpr8_sgpr9
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %copy:sgpr(p1) = COPY $sgpr8_sgpr9
; CHECK-NEXT: %assert_align:sgpr(p1) = G_ASSERT_ALIGN %copy, 4
; CHECK-NEXT: S_ENDPGM 0, implicit %assert_align(p1)
%copy:_(p1) = COPY $sgpr8_sgpr9
%assert_align:_(p1) = G_ASSERT_ALIGN %copy, 4
S_ENDPGM 0, implicit %assert_align
...

---
name: assert_align_agpr
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $agpr0_agpr1
; CHECK-LABEL: name: assert_align_agpr
; CHECK: liveins: $agpr0_agpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %copy:agpr(p1) = COPY $agpr0_agpr1
; CHECK-NEXT: %assert_align:agpr(p1) = G_ASSERT_ALIGN %copy, 4
; CHECK-NEXT: S_ENDPGM 0, implicit %assert_align(p1)
%copy:_(p1) = COPY $agpr0_agpr1
%assert_align:_(p1) = G_ASSERT_ALIGN %copy, 4
S_ENDPGM 0, implicit %assert_align
...
55 changes: 55 additions & 0 deletions llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1917,3 +1917,58 @@ TEST_F(AMDGPUGISelMITest, TestNumSignBitsSBFX) {
EXPECT_EQ(1u, Info.computeNumSignBits(CopyUnkValBfxReg));
EXPECT_EQ(1u, Info.computeNumSignBits(CopyUnkOffBfxReg));
}

TEST_F(AMDGPUGISelMITest, TestKnownBitsAssertAlign) {
StringRef MIRString = R"MIR(
%val:_(s64) = COPY $vgpr0_vgpr1
%ptrval:_(p1) = COPY $vgpr0_vgpr1
%assert_align0:_(s64) = G_ASSERT_ALIGN %val, 0
%copy_assert_align0:_(s64) = COPY %assert_align0
%assert_align1:_(s64) = G_ASSERT_ALIGN %val, 1
%copy_assert_align1:_(s64) = COPY %assert_align1
%assert_align2:_(s64) = G_ASSERT_ALIGN %val, 2
%copy_assert_align2:_(s64) = COPY %assert_align2
%assert_align3:_(s64) = G_ASSERT_ALIGN %val, 3
%copy_assert_align3:_(s64) = COPY %assert_align3
%assert_align8:_(s64) = G_ASSERT_ALIGN %val, 8
%copy_assert_align8:_(s64) = COPY %assert_align8
%assert_maxalign:_(s64) = G_ASSERT_ALIGN %val, 30
%copy_assert_maxalign:_(s64) = COPY %assert_maxalign
%assert_ptr_align5:_(p1) = G_ASSERT_ALIGN %ptrval, 5
%copy_assert_ptr_align5:_(p1) = COPY %assert_ptr_align5
)MIR";
setUp(MIRString);
if (!TM)
return;
GISelKnownBits Info(*MF);

KnownBits Res;
auto GetKB = [&](unsigned Idx) {
Register CopyReg = Copies[Idx];
auto *Copy = MRI->getVRegDef(CopyReg);
return Info.getKnownBits(Copy->getOperand(1).getReg());
};

auto CheckBits = [&](unsigned NumBits, unsigned Idx) {
Res = GetKB(Idx);
EXPECT_EQ(64u, Res.getBitWidth());
EXPECT_EQ(NumBits, Res.Zero.countTrailingOnes());
EXPECT_EQ(64u, Res.One.countTrailingZeros());
EXPECT_EQ(Align(1ull << NumBits), Info.computeKnownAlignment(Copies[Idx]));
};

CheckBits(0, Copies.size() - 7);
CheckBits(1, Copies.size() - 6);
CheckBits(2, Copies.size() - 5);
CheckBits(3, Copies.size() - 4);
CheckBits(8, Copies.size() - 3);
CheckBits(30, Copies.size() - 2);
CheckBits(5, Copies.size() - 1);
}