Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[mlir] fix overflow in LLVM dialect inlining #72878

Merged
merged 2 commits into from
Nov 21, 2023
Merged

Conversation

ftynse
Copy link
Member

@ftynse ftynse commented Nov 20, 2023

Don't use unsigned for sizes as they may be larger than that type can hold.

Fixes #72678.

Copy link
Contributor

@gysit gysit left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

I guess tryToEnforceAlignment and tryToEnforceAllocaAlignment should probably also use uint64_t for consistency?

mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp Show resolved Hide resolved
Don't use unsigned for sizes as they may be larger than that type can
hold.

Fixes llvm#72678.
@ftynse ftynse marked this pull request as ready for review November 21, 2023 15:25
@llvmbot
Copy link
Collaborator

llvmbot commented Nov 21, 2023

@llvm/pr-subscribers-mlir-llvm

@llvm/pr-subscribers-mlir

Author: Oleksandr "Alex" Zinenko (ftynse)

Changes

Don't use unsigned for sizes as they may be larger than that type can hold.

Fixes #72678.


Full diff: https://github.com/llvm/llvm-project/pull/72878.diff

2 Files Affected:

  • (modified) mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp (+12-12)
  • (modified) mlir/test/Dialect/LLVMIR/inline-byval-huge.mlir (+18-1)
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp
index 2abb9b0e3986872..d9f52d7692938f0 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp
@@ -496,14 +496,14 @@ static void handleAccessGroups(Operation *call,
 /// If `requestedAlignment` is higher than the alignment specified on `alloca`,
 /// realigns `alloca` if this does not exceed the natural stack alignment.
 /// Returns the post-alignment of `alloca`, whether it was realigned or not.
-static unsigned tryToEnforceAllocaAlignment(LLVM::AllocaOp alloca,
-                                            unsigned requestedAlignment,
+static uint64_t tryToEnforceAllocaAlignment(LLVM::AllocaOp alloca,
+                                            uint64_t requestedAlignment,
                                             DataLayout const &dataLayout) {
-  unsigned allocaAlignment = alloca.getAlignment().value_or(1);
+  uint64_t allocaAlignment = alloca.getAlignment().value_or(1);
   if (requestedAlignment <= allocaAlignment)
     // No realignment necessary.
     return allocaAlignment;
-  unsigned naturalStackAlignmentBits = dataLayout.getStackAlignment();
+  uint64_t naturalStackAlignmentBits = dataLayout.getStackAlignment();
   // If the natural stack alignment is not specified, the data layout returns
   // zero. Optimistically allow realignment in this case.
   if (naturalStackAlignmentBits == 0 ||
@@ -525,7 +525,7 @@ static unsigned tryToEnforceAllocaAlignment(LLVM::AllocaOp alloca,
 /// the pointer, then returns the resulting post-alignment, regardless of
 /// whether it was realigned or not. If no existing alignment attribute is
 /// found, returns 1 (i.e., assume that no alignment is guaranteed).
-static unsigned tryToEnforceAlignment(Value value, unsigned requestedAlignment,
+static uint64_t tryToEnforceAlignment(Value value, uint64_t requestedAlignment,
                                       DataLayout const &dataLayout) {
   if (Operation *definingOp = value.getDefiningOp()) {
     if (auto alloca = dyn_cast<LLVM::AllocaOp>(definingOp))
@@ -557,8 +557,8 @@ static unsigned tryToEnforceAlignment(Value value, unsigned requestedAlignment,
 /// the address of the new alloca, then returns the value of the new alloca.
 static Value handleByValArgumentInit(OpBuilder &builder, Location loc,
                                      Value argument, Type elementType,
-                                     unsigned elementTypeSize,
-                                     unsigned targetAlignment) {
+                                     uint64_t elementTypeSize,
+                                     uint64_t targetAlignment) {
   // Allocate the new value on the stack.
   Value allocaOp;
   {
@@ -587,7 +587,7 @@ static Value handleByValArgumentInit(OpBuilder &builder, Location loc,
 /// attribute (or 1 if no align attribute was set).
 static Value handleByValArgument(OpBuilder &builder, Operation *callable,
                                  Value argument, Type elementType,
-                                 unsigned requestedAlignment) {
+                                 uint64_t requestedAlignment) {
   auto func = cast<LLVM::LLVMFuncOp>(callable);
   LLVM::MemoryEffectsAttr memoryEffects = func.getMemoryAttr();
   // If there is no memory effects attribute, assume that the function is
@@ -597,16 +597,16 @@ static Value handleByValArgument(OpBuilder &builder, Operation *callable,
                     memoryEffects.getArgMem() != LLVM::ModRefInfo::Mod;
   // Check if there's an alignment mismatch requiring us to copy.
   DataLayout dataLayout = DataLayout::closest(callable);
-  unsigned minimumAlignment = dataLayout.getTypeABIAlignment(elementType);
+  uint64_t minimumAlignment = dataLayout.getTypeABIAlignment(elementType);
   if (isReadOnly) {
     if (requestedAlignment <= minimumAlignment)
       return argument;
-    unsigned currentAlignment =
+    uint64_t currentAlignment =
         tryToEnforceAlignment(argument, requestedAlignment, dataLayout);
     if (currentAlignment >= requestedAlignment)
       return argument;
   }
-  unsigned targetAlignment = std::max(requestedAlignment, minimumAlignment);
+  uint64_t targetAlignment = std::max(requestedAlignment, minimumAlignment);
   return handleByValArgumentInit(builder, func.getLoc(), argument, elementType,
                                  dataLayout.getTypeSize(elementType),
                                  targetAlignment);
@@ -753,7 +753,7 @@ struct LLVMInlinerInterface : public DialectInlinerInterface {
     if (std::optional<NamedAttribute> attr =
             argumentAttrs.getNamed(LLVM::LLVMDialect::getByValAttrName())) {
       Type elementType = cast<TypeAttr>(attr->getValue()).getValue();
-      unsigned requestedAlignment = 1;
+      uint64_t requestedAlignment = 1;
       if (std::optional<NamedAttribute> alignAttr =
               argumentAttrs.getNamed(LLVM::LLVMDialect::getAlignAttrName())) {
         requestedAlignment = cast<IntegerAttr>(alignAttr->getValue())
diff --git a/mlir/test/Dialect/LLVMIR/inline-byval-huge.mlir b/mlir/test/Dialect/LLVMIR/inline-byval-huge.mlir
index 3e246d7f21d5f88..ef39b96b2736617 100644
--- a/mlir/test/Dialect/LLVMIR/inline-byval-huge.mlir
+++ b/mlir/test/Dialect/LLVMIR/inline-byval-huge.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -inline %s | FileCheck %s
+// RUN: mlir-opt -inline --split-input-file %s | FileCheck %s
 
 // CHECK-LABEL: @byval_2_000_000_000
 llvm.func @byval_2_000_000_000(%ptr : !llvm.ptr) {
@@ -12,3 +12,20 @@ llvm.func @byval_2_000_000_000(%ptr : !llvm.ptr) {
 llvm.func @with_byval_arg(%ptr : !llvm.ptr { llvm.byval = !llvm.array<1000 x array<1000 x array <500 x i32>>> }) {
   llvm.return
 }
+
+// -----
+
+// This exceeds representational capacity of 32-bit unsigned value.
+
+// CHECK-LABEL: @byval_8_000_000_000
+llvm.func @byval_8_000_000_000(%ptr : !llvm.ptr) {
+  // CHECK: %[[SIZE:.+]] = llvm.mlir.constant(8000000000 : i64)
+  // CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %[[SIZE]]
+
+  llvm.call @with_byval_arg(%ptr) : (!llvm.ptr) -> ()
+  llvm.return
+}
+
+llvm.func @with_byval_arg(%ptr : !llvm.ptr { llvm.byval = !llvm.array<2000 x array<2000 x array <500 x i32>>> }) {
+  llvm.return
+}

@ftynse ftynse merged commit 8169c15 into llvm:main Nov 21, 2023
6 checks passed
@ftynse ftynse deleted the dataflow-inline branch November 21, 2023 17:27
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

mlir::DataLayout usage of unsigned is too small for applications with huge arrays
3 participants