diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index d57005237187e6..841da97d03f997 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -945,13 +945,11 @@ MemRefType mlir::canonicalizeStridedLayout(MemRefType t) { AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef sizes, ArrayRef exprs, MLIRContext *context) { - assert(!sizes.empty() && !exprs.empty() && - "expected non-empty sizes and exprs"); - // Size 0 corner case is useful for canonicalizations. - if (llvm::is_contained(sizes, 0)) + if (sizes.empty() || llvm::is_contained(sizes, 0)) return getAffineConstantExpr(0, context); + assert(!exprs.empty() && "expected exprs"); auto maps = AffineMap::inferFromExprList(exprs); assert(!maps.empty() && "Expected one non-empty map"); unsigned numDims = maps[0].getNumDims(), nSymbols = maps[0].getNumSymbols(); diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir index 8b215769e0751d..84fcea403227ea 100644 --- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir @@ -1052,6 +1052,20 @@ func.func @memref_copy_contiguous(%in: memref<16x2xi32>, %offset: index) { // ----- +// CHECK-LABEL: func @memref_copy_0d_offset +#map0 = affine_map<(d0) -> (d0 + 1)> +#map1 = affine_map<() -> (1)> +func.func @memref_copy_0d_offset(%in: memref<2xi32>) { + %buf = memref.alloc() : memref + %sub = memref.subview %in[1] [1] [1] : memref<2xi32> to memref<1xi32, #map0> + %scalar = memref.collapse_shape %sub [] : memref<1xi32, #map0> into memref + memref.copy %scalar, %buf : memref to memref + // CHECK: llvm.intr.memcpy + return +} + +// ----- + // CHECK-LABEL: func @memref_copy_noncontiguous #map = affine_map<(d0, d1)[s0] -> (d0 * 2 + s0 + d1)> func.func @memref_copy_noncontiguous(%in: memref<16x2xi32>, %offset: index) {