diff --git a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp index aec3ea294ac6c..dcb5f42902ee6 100644 --- a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp +++ b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp @@ -58,11 +58,13 @@ struct CUFComputeSharedMemoryOffsetsAndSize auto gpuMod = cuf::getOrCreateGPUModule(mod, symTab); mlir::Type i8Ty = builder.getI8Type(); mlir::Type i32Ty = builder.getI32Type(); + mlir::Type idxTy = builder.getIndexType(); for (auto funcOp : gpuMod.getOps()) { unsigned nbDynamicSharedVariables = 0; unsigned nbStaticSharedVariables = 0; uint64_t sharedMemSize = 0; unsigned short alignment = 0; + mlir::Value crtDynOffset; // Go over each shared memory operation and compute their start offset and // the size and alignment of the global to be generated if all variables @@ -73,16 +75,30 @@ struct CUFComputeSharedMemoryOffsetsAndSize builder.setInsertionPoint(sharedOp); if (fir::hasDynamicSize(sharedOp.getInType())) { mlir::Type ty = sharedOp.getInType(); - // getTypeSizeAndAlignmentOrCrash will crash trying to compute the - // size of an array with dynamic size. Just get the alignment to - // create the global. if (auto seqTy = mlir::dyn_cast(ty)) ty = seqTy.getEleTy(); unsigned short align = dl->getTypeABIAlignment(ty); - ++nbDynamicSharedVariables; - mlir::Value zero = builder.createIntegerConstant(loc, i32Ty, 0); - sharedOp.getOffsetMutable().assign(zero); alignment = std::max(alignment, align); + uint64_t tySize = dl->getTypeSize(ty); + ++nbDynamicSharedVariables; + if (crtDynOffset) { + sharedOp.getOffsetMutable().assign( + builder.createConvert(loc, i32Ty, crtDynOffset)); + } else { + mlir::Value zero = builder.createIntegerConstant(loc, i32Ty, 0); + sharedOp.getOffsetMutable().assign(zero); + } + + mlir::Value dynSize = + builder.createIntegerConstant(loc, idxTy, tySize); + for (auto extent : sharedOp.getShape()) + dynSize = builder.create(loc, dynSize, extent); + if (crtDynOffset) + crtDynOffset = + builder.create(loc, crtDynOffset, dynSize); + else + crtDynOffset = dynSize; + continue; } auto [size, align] = fir::getTypeSizeAndAlignmentOrCrash( diff --git a/flang/test/Fir/CUDA/cuda-shared-offset.mlir b/flang/test/Fir/CUDA/cuda-shared-offset.mlir index 1eea75c802204..5e9aac4e71438 100644 --- a/flang/test/Fir/CUDA/cuda-shared-offset.mlir +++ b/flang/test/Fir/CUDA/cuda-shared-offset.mlir @@ -54,3 +54,56 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<28xi8>) {alignment = 8 : i64, data_attr = #cuf.cuda} : !fir.array<28xi8> // CHECK: } // CHECK: } + +// ----- + +module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} { + gpu.module @cuda_device_mod { + gpu.func @_QMmPshareddyn(%arg0: !fir.box> {cuf.data_attr = #cuf.cuda, fir.bindc_name = "a"}, %arg1: !fir.box> {cuf.data_attr = #cuf.cuda, fir.bindc_name = "b"}, %arg2: i32 {fir.bindc_name = "k"}) attributes {cuf.proc_attr = #cuf.cuda_proc} { + %c1_i32 = arith.constant 1 : i32 + %c2_i32 = arith.constant 2 : i32 + %c0 = arith.constant 0 : index + %5 = fir.address_of(@_QM__fortran_builtinsE__builtin_blockdim) : !fir.ref> + %6 = fir.declare %5 {uniq_name = "_QM__fortran_builtinsE__builtin_blockdim"} : (!fir.ref>) -> !fir.ref> + %15 = fir.alloca i32 + %16 = fir.declare %15 {fortran_attrs = #fir.var_attrs, uniq_name = "_QMmFss1Ek"} : (!fir.ref) -> !fir.ref + %27 = fir.coordinate_of %6, x : (!fir.ref>) -> !fir.ref + %28 = fir.load %27 : !fir.ref + %29 = fir.convert %28 : (i32) -> i64 + %30 = fir.convert %29 : (i64) -> index + %31 = arith.cmpi sgt, %30, %c0 : index + %32 = arith.select %31, %30, %c0 : index + %33 = fir.coordinate_of %6, y : (!fir.ref>) -> !fir.ref + %34 = fir.load %33 : !fir.ref + %35 = fir.convert %34 : (i32) -> i64 + %36 = fir.convert %35 : (i64) -> index + %37 = arith.cmpi sgt, %36, %c0 : index + %38 = arith.select %37, %36, %c0 : index + %39 = cuf.shared_memory !fir.array, %32, %38 : index, index {bindc_name = "s1", uniq_name = "_QMmFss1Es1"} -> !fir.ref> + %40 = fir.shape %32, %38 : (index, index) -> !fir.shape<2> + %41 = fir.declare %39(%40) {data_attr = #cuf.cuda, uniq_name = "_QMmFss1Es1"} : (!fir.ref>, !fir.shape<2>) -> !fir.ref> + %42 = fir.load %16 : !fir.ref + %43 = arith.muli %42, %c2_i32 : i32 + %44 = fir.convert %43 : (i32) -> i64 + %45 = fir.convert %44 : (i64) -> index + %46 = arith.cmpi sgt, %45, %c0 : index + %47 = arith.select %46, %45, %c0 : index + %48 = fir.load %16 : !fir.ref + %49 = fir.convert %48 : (i32) -> i64 + %50 = fir.convert %49 : (i64) -> index + %51 = arith.cmpi sgt, %50, %c0 : index + %52 = arith.select %51, %50, %c0 : index + %53 = cuf.shared_memory !fir.array, %47, %52 : index, index {bindc_name = "s2", uniq_name = "_QMmFss1Es2"} -> !fir.ref> + gpu.return + } + } +} + +// CHECK: gpu.func @_QMmPshareddyn(%arg0: !fir.box> {cuf.data_attr = #cuf.cuda, fir.bindc_name = "a"}, %arg1: !fir.box> {cuf.data_attr = #cuf.cuda, fir.bindc_name = "b"}, %arg2: i32 {fir.bindc_name = "k"}) attributes {cuf.proc_attr = #cuf.cuda_proc} { +// CHECK: %[[EXTENT0:.*]] = arith.select +// CHECK: %[[EXTENT1:.*]] = arith.select +// CHECK: %[[SIZE_EXTENT:.*]] = arith.muli %c4{{.*}}, %[[EXTENT0]] : index +// CHECK: %[[DYNSIZE:.*]] = arith.muli %[[SIZE_EXTENT]], %[[EXTENT1]] : index +// CHECK: cuf.shared_memory[%c0{{.*}} : i32] !fir.array, %9, %15 : index, index {bindc_name = "s1", uniq_name = "_QMmFss1Es1"} -> !fir.ref> +// CHECK: %[[CONV_DYNSIZE:.*]] = fir.convert %[[DYNSIZE]] : (index) -> i32 +// CHECK: cuf.shared_memory[%[[CONV_DYNSIZE]] : i32] !fir.array, %26, %31 : index, index {bindc_name = "s2", uniq_name = "_QMmFss1Es2"} -> !fir.ref>