Skip to content

Conversation

@arsenm
Copy link
Contributor

@arsenm arsenm commented Nov 12, 2025

No description provided.

Copy link
Contributor Author

arsenm commented Nov 12, 2025

This stack of pull requests is managed by Graphite. Learn more about stacking.

@arsenm arsenm marked this pull request as ready for review November 12, 2025 01:00
@llvmbot
Copy link
Member

llvmbot commented Nov 12, 2025

@llvm/pr-subscribers-llvm-transforms

Author: Matt Arsenault (arsenm)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/167611.diff

1 Files Affected:

  • (modified) llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll (+200-8)
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
index e0c80c0389541..32dca860a7ded 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces -o - %s | FileCheck %s
 
-define float @f0(ptr %p) {
-; CHECK-LABEL: define float @f0(
+define float @assume_is_shared_gep(ptr %p) {
+; CHECK-LABEL: define float @assume_is_shared_gep(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
@@ -24,8 +24,8 @@ entry:
   ret float %load
 }
 
-define float @f1(ptr %p) {
-; CHECK-LABEL: define float @f1(
+define float @assume_is_private_gep(ptr %p) {
+; CHECK-LABEL: define float @assume_is_private_gep(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
@@ -47,8 +47,8 @@ entry:
   ret float %load
 }
 
-define float @f2(ptr %p) {
-; CHECK-LABEL: define float @f2(
+define float @assume_not_private_and_not_shared_gep(ptr %p) {
+; CHECK-LABEL: define float @assume_not_private_and_not_shared_gep(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
@@ -78,8 +78,8 @@ entry:
   ret float %load
 }
 
-define float @g0(i32 %c, ptr %p) {
-; CHECK-LABEL: define float @g0(
+define float @conditionally_assume_is_shared_gep(i32 %c, ptr %p) {
+; CHECK-LABEL: define float @conditionally_assume_is_shared_gep(
 ; CHECK-SAME: i32 [[C:%.*]], ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*]]:
 ; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
@@ -127,6 +127,198 @@ if.end:
   ret float %add2
 }
 
+define float @conditionally_assume_is_shared_else_assume_private(i32 %c, ptr %p) {
+; CHECK-LABEL: define float @conditionally_assume_is_shared_else_assume_private(
+; CHECK-SAME: i32 [[C:%.*]], ptr [[P:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label %[[IF_THEN_SHARED:.*]], label %[[IF_THEN_PRIVATE:.*]]
+; CHECK:       [[IF_THEN_SHARED]]:
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    [[WORKITEM_ID_X_0:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[WORKITEM_ID_X_0]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(3)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[TMP0]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[LOAD0:%.*]] = load float, ptr addrspace(3) [[ARRAYIDX0]], align 4
+; CHECK-NEXT:    [[ADD0:%.*]] = fadd float [[LOAD0]], 4.000000e+00
+; CHECK-NEXT:    br label %[[IF_END:.*]]
+; CHECK:       [[IF_THEN_PRIVATE]]:
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_PRIVATE]])
+; CHECK-NEXT:    [[WORKITEM_ID_X_1:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM1:%.*]] = zext i32 [[WORKITEM_ID_X_1]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(5)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr addrspace(5) [[TMP1]], i64 [[IDXPROM1]]
+; CHECK-NEXT:    [[LOAD1:%.*]] = load float, ptr addrspace(5) [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ADD1:%.*]] = fadd float [[LOAD1]], 4.000000e+00
+; CHECK-NEXT:    br label %[[IF_END]]
+; CHECK:       [[IF_END]]:
+; CHECK-NEXT:    [[PHI:%.*]] = phi float [ [[ADD0]], %[[IF_THEN_SHARED]] ], [ [[ADD1]], %[[IF_THEN_PRIVATE]] ]
+; CHECK-NEXT:    ret float [[PHI]]
+;
+entry:
+  %tobool.not = icmp eq i32 %c, 0
+  br i1 %tobool.not, label %if.then.shared, label %if.then.private
+
+if.then.shared:
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  tail call void @llvm.assume(i1 %is.shared)
+  %workitem.id.x.0 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %workitem.id.x.0 to i64
+  %arrayidx0 = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %load0 = load float, ptr %arrayidx0, align 4
+  %add0 = fadd float %load0, 4.0
+  br label %if.end
+
+if.then.private:
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %p)
+  tail call void @llvm.assume(i1 %is.private)
+  %workitem.id.x.1 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom1 = zext i32 %workitem.id.x.1 to i64
+  %arrayidx1 = getelementptr inbounds float, ptr %p, i64 %idxprom1
+  %load1 = load float, ptr %arrayidx1, align 4
+  %add1 = fadd float %load1, 4.0
+  br label %if.end
+
+if.end:
+  %phi = phi float [ %add0, %if.then.shared ], [ %add1, %if.then.private ]
+  ret float %phi
+}
+
+define float @assume_func_arg_is_shared_load(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_shared_load(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %flat.ptr)
+  tail call void @llvm.assume(i1 %is.shared)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define float @assume_func_arg_is_private_load(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_private_load(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_PRIVATE]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  tail call void @llvm.assume(i1 %is.private)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define float @assume_func_arg_is_not_shared_not_private(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_not_shared_not_private(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_PRIVATE:%.*]] = xor i1 [[IS_PRIVATE]], true
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_SHARED:%.*]] = xor i1 [[IS_SHARED]], true
+; CHECK-NEXT:    [[NOT_PRIVATE_AND_NOT_SHARED:%.*]] = and i1 [[NOT_PRIVATE]], [[NOT_SHARED]]
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[NOT_PRIVATE_AND_NOT_SHARED]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  %not.private = xor i1 %is.private, true
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %flat.ptr)
+  %not.shared = xor i1 %is.shared, true
+  %not.private.and.not.shared = and i1 %not.private, %not.shared
+  tail call void @llvm.assume(i1 %not.private.and.not.shared)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define float @assume_func_arg_is_not_private_load(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_not_private_load(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_IS_PRIVATE:%.*]] = xor i1 [[IS_PRIVATE]], true
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[NOT_IS_PRIVATE]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  %not.is.private = xor i1 %is.private, true
+  tail call void @llvm.assume(i1 %not.is.private)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define i64 @assume_func_arg_is_not_private_atomicrmw(ptr %flat.ptr, i64 %val) {
+; CHECK-LABEL: define i64 @assume_func_arg_is_not_private_atomicrmw(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]], i64 [[VAL:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_IS_PRIVATE:%.*]] = xor i1 [[IS_PRIVATE]], true
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[NOT_IS_PRIVATE]])
+; CHECK-NEXT:    [[RMW:%.*]] = atomicrmw sub ptr [[FLAT_PTR]], i64 [[VAL]] seq_cst, align 4
+; CHECK-NEXT:    ret i64 [[RMW]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  %not.is.private = xor i1 %is.private, true
+  tail call void @llvm.assume(i1 %not.is.private)
+  %rmw = atomicrmw sub ptr %flat.ptr, i64 %val seq_cst, align 4
+  ret i64 %rmw
+}
+
+define float @contradictory_assume_after_gep_same_block(ptr %p) {
+; CHECK-LABEL: define float @contradictory_assume_after_gep_same_block(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    [[WORKITEM_ID_X:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[WORKITEM_ID_X]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(3)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[TMP1]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[TMP2]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr addrspace(3) [[GEP]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  tail call void @llvm.assume(i1 %is.shared)
+  %workitem.id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %workitem.id.x to i64
+  %gep = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %gep)
+  tail call void @llvm.assume(i1 %is.private)
+  %load = load float, ptr %gep, align 4
+  ret float %load
+}
+
+define float @contradictory_assume_argument_same_block(ptr %p) {
+; CHECK-LABEL: define float @contradictory_assume_argument_same_block(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_PRIVATE]])
+; CHECK-NEXT:    [[WORKITEM_ID_X:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[WORKITEM_ID_X]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(3)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[TMP1]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr addrspace(3) [[GEP]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %p)
+  tail call void @llvm.assume(i1 %is.shared)
+  tail call void @llvm.assume(i1 %is.private)
+  %workitem.id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %workitem.id.x to i64
+  %gep = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %load = load float, ptr %gep, align 4
+  ret float %load
+}
+
 declare void @llvm.assume(i1)
 declare i1 @llvm.amdgcn.is.shared(ptr nocapture)
 declare i1 @llvm.amdgcn.is.private(ptr nocapture)

@llvmbot
Copy link
Member

llvmbot commented Nov 12, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Matt Arsenault (arsenm)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/167611.diff

1 Files Affected:

  • (modified) llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll (+200-8)
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
index e0c80c0389541..32dca860a7ded 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces -o - %s | FileCheck %s
 
-define float @f0(ptr %p) {
-; CHECK-LABEL: define float @f0(
+define float @assume_is_shared_gep(ptr %p) {
+; CHECK-LABEL: define float @assume_is_shared_gep(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
@@ -24,8 +24,8 @@ entry:
   ret float %load
 }
 
-define float @f1(ptr %p) {
-; CHECK-LABEL: define float @f1(
+define float @assume_is_private_gep(ptr %p) {
+; CHECK-LABEL: define float @assume_is_private_gep(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
@@ -47,8 +47,8 @@ entry:
   ret float %load
 }
 
-define float @f2(ptr %p) {
-; CHECK-LABEL: define float @f2(
+define float @assume_not_private_and_not_shared_gep(ptr %p) {
+; CHECK-LABEL: define float @assume_not_private_and_not_shared_gep(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
@@ -78,8 +78,8 @@ entry:
   ret float %load
 }
 
-define float @g0(i32 %c, ptr %p) {
-; CHECK-LABEL: define float @g0(
+define float @conditionally_assume_is_shared_gep(i32 %c, ptr %p) {
+; CHECK-LABEL: define float @conditionally_assume_is_shared_gep(
 ; CHECK-SAME: i32 [[C:%.*]], ptr [[P:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*]]:
 ; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
@@ -127,6 +127,198 @@ if.end:
   ret float %add2
 }
 
+define float @conditionally_assume_is_shared_else_assume_private(i32 %c, ptr %p) {
+; CHECK-LABEL: define float @conditionally_assume_is_shared_else_assume_private(
+; CHECK-SAME: i32 [[C:%.*]], ptr [[P:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label %[[IF_THEN_SHARED:.*]], label %[[IF_THEN_PRIVATE:.*]]
+; CHECK:       [[IF_THEN_SHARED]]:
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    [[WORKITEM_ID_X_0:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[WORKITEM_ID_X_0]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(3)
+; CHECK-NEXT:    [[ARRAYIDX0:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[TMP0]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[LOAD0:%.*]] = load float, ptr addrspace(3) [[ARRAYIDX0]], align 4
+; CHECK-NEXT:    [[ADD0:%.*]] = fadd float [[LOAD0]], 4.000000e+00
+; CHECK-NEXT:    br label %[[IF_END:.*]]
+; CHECK:       [[IF_THEN_PRIVATE]]:
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_PRIVATE]])
+; CHECK-NEXT:    [[WORKITEM_ID_X_1:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM1:%.*]] = zext i32 [[WORKITEM_ID_X_1]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(5)
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr addrspace(5) [[TMP1]], i64 [[IDXPROM1]]
+; CHECK-NEXT:    [[LOAD1:%.*]] = load float, ptr addrspace(5) [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[ADD1:%.*]] = fadd float [[LOAD1]], 4.000000e+00
+; CHECK-NEXT:    br label %[[IF_END]]
+; CHECK:       [[IF_END]]:
+; CHECK-NEXT:    [[PHI:%.*]] = phi float [ [[ADD0]], %[[IF_THEN_SHARED]] ], [ [[ADD1]], %[[IF_THEN_PRIVATE]] ]
+; CHECK-NEXT:    ret float [[PHI]]
+;
+entry:
+  %tobool.not = icmp eq i32 %c, 0
+  br i1 %tobool.not, label %if.then.shared, label %if.then.private
+
+if.then.shared:
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  tail call void @llvm.assume(i1 %is.shared)
+  %workitem.id.x.0 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %workitem.id.x.0 to i64
+  %arrayidx0 = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %load0 = load float, ptr %arrayidx0, align 4
+  %add0 = fadd float %load0, 4.0
+  br label %if.end
+
+if.then.private:
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %p)
+  tail call void @llvm.assume(i1 %is.private)
+  %workitem.id.x.1 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom1 = zext i32 %workitem.id.x.1 to i64
+  %arrayidx1 = getelementptr inbounds float, ptr %p, i64 %idxprom1
+  %load1 = load float, ptr %arrayidx1, align 4
+  %add1 = fadd float %load1, 4.0
+  br label %if.end
+
+if.end:
+  %phi = phi float [ %add0, %if.then.shared ], [ %add1, %if.then.private ]
+  ret float %phi
+}
+
+define float @assume_func_arg_is_shared_load(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_shared_load(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %flat.ptr)
+  tail call void @llvm.assume(i1 %is.shared)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define float @assume_func_arg_is_private_load(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_private_load(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_PRIVATE]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  tail call void @llvm.assume(i1 %is.private)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define float @assume_func_arg_is_not_shared_not_private(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_not_shared_not_private(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_PRIVATE:%.*]] = xor i1 [[IS_PRIVATE]], true
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_SHARED:%.*]] = xor i1 [[IS_SHARED]], true
+; CHECK-NEXT:    [[NOT_PRIVATE_AND_NOT_SHARED:%.*]] = and i1 [[NOT_PRIVATE]], [[NOT_SHARED]]
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[NOT_PRIVATE_AND_NOT_SHARED]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  %not.private = xor i1 %is.private, true
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %flat.ptr)
+  %not.shared = xor i1 %is.shared, true
+  %not.private.and.not.shared = and i1 %not.private, %not.shared
+  tail call void @llvm.assume(i1 %not.private.and.not.shared)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define float @assume_func_arg_is_not_private_load(ptr %flat.ptr) {
+; CHECK-LABEL: define float @assume_func_arg_is_not_private_load(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_IS_PRIVATE:%.*]] = xor i1 [[IS_PRIVATE]], true
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[NOT_IS_PRIVATE]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[FLAT_PTR]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  %not.is.private = xor i1 %is.private, true
+  tail call void @llvm.assume(i1 %not.is.private)
+  %load = load float, ptr %flat.ptr, align 4
+  ret float %load
+}
+
+define i64 @assume_func_arg_is_not_private_atomicrmw(ptr %flat.ptr, i64 %val) {
+; CHECK-LABEL: define i64 @assume_func_arg_is_not_private_atomicrmw(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]], i64 [[VAL:%.*]]) {
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[FLAT_PTR]])
+; CHECK-NEXT:    [[NOT_IS_PRIVATE:%.*]] = xor i1 [[IS_PRIVATE]], true
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[NOT_IS_PRIVATE]])
+; CHECK-NEXT:    [[RMW:%.*]] = atomicrmw sub ptr [[FLAT_PTR]], i64 [[VAL]] seq_cst, align 4
+; CHECK-NEXT:    ret i64 [[RMW]]
+;
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %flat.ptr)
+  %not.is.private = xor i1 %is.private, true
+  tail call void @llvm.assume(i1 %not.is.private)
+  %rmw = atomicrmw sub ptr %flat.ptr, i64 %val seq_cst, align 4
+  ret i64 %rmw
+}
+
+define float @contradictory_assume_after_gep_same_block(ptr %p) {
+; CHECK-LABEL: define float @contradictory_assume_after_gep_same_block(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    [[WORKITEM_ID_X:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[WORKITEM_ID_X]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(3)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[TMP1]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[TMP2]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr addrspace(3) [[GEP]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  tail call void @llvm.assume(i1 %is.shared)
+  %workitem.id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %workitem.id.x to i64
+  %gep = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %gep)
+  tail call void @llvm.assume(i1 %is.private)
+  %load = load float, ptr %gep, align 4
+  ret float %load
+}
+
+define float @contradictory_assume_argument_same_block(ptr %p) {
+; CHECK-LABEL: define float @contradictory_assume_argument_same_block(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[P]])
+; CHECK-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[P]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_SHARED]])
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[IS_PRIVATE]])
+; CHECK-NEXT:    [[WORKITEM_ID_X:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[WORKITEM_ID_X]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(3)
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[TMP1]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr addrspace(3) [[GEP]], align 4
+; CHECK-NEXT:    ret float [[LOAD]]
+;
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %p)
+  tail call void @llvm.assume(i1 %is.shared)
+  tail call void @llvm.assume(i1 %is.private)
+  %workitem.id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %workitem.id.x to i64
+  %gep = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %load = load float, ptr %gep, align 4
+  ret float %load
+}
+
 declare void @llvm.assume(i1)
 declare i1 @llvm.amdgcn.is.shared(ptr nocapture)
 declare i1 @llvm.amdgcn.is.private(ptr nocapture)

Copy link
Contributor

@darkbuck darkbuck left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@arsenm arsenm merged commit c1f18a2 into main Nov 12, 2025
13 of 14 checks passed
@arsenm arsenm deleted the users/arsenm/infer-address-spaces-add-more-assume-tests branch November 12, 2025 02:26
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants