diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td index b72f0df034012..771de01fc8d5d 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -807,6 +807,11 @@ def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ "ArrayRef":$shape, "Type":$elementType, CArg<"AffineMap">:$map, CArg<"Attribute", "{}">:$memorySpace)>, + /// [deprecated] `Attribute`-based form should be used instead. + TypeBuilderWithInferredContext<(ins + "ArrayRef":$shape, "Type":$elementType, + "AffineMap":$map, + "unsigned":$memorySpaceInd)> ]; let extraClassDeclaration = [{ using BaseMemRefType::clone; @@ -1175,6 +1180,14 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ Attribute nonDefaultMemorySpace = skipDefaultMemorySpace(memorySpace); return $_get(elementType.getContext(), elementType, nonDefaultMemorySpace); }]>, + /// [deprecated] `Attribute`-based form should be used instead. + TypeBuilderWithInferredContext<(ins "Type":$elementType, + "unsigned":$memorySpace), [{ + // Convert deprecated integer-like memory space to Attribute. + Attribute memorySpaceAttr = + wrapIntegerMemorySpace(memorySpace, elementType.getContext()); + return UnrankedMemRefType::get(elementType, memorySpaceAttr); + }]> ]; let extraClassDeclaration = [{ using BaseMemRefType::clone; diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp index df1753ac97c74..0d4ba3940c48e 100644 --- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp @@ -2097,12 +2097,10 @@ static LogicalResult generateCopy( // Check if a buffer was already created. bool existingBuf = fastBufferMap.count(memref) > 0; if (!existingBuf) { - Attribute fastMemorySpace; - if (copyOptions.fastMemorySpace != 0) - fastMemorySpace = prologue.getI64IntegerAttr(copyOptions.fastMemorySpace); + AffineMap fastBufferLayout = b.getMultiDimIdentityMap(rank); auto fastMemRefType = MemRefType::get(fastBufferShape, memRefType.getElementType(), - MemRefLayoutAttrInterface{}, fastMemorySpace); + fastBufferLayout, copyOptions.fastMemorySpace); // Create the fast memory space buffer just before the 'affine.for' // operation. @@ -2177,12 +2175,8 @@ static LogicalResult generateCopy( } else { // DMA generation. // Create a tag (single element 1-d memref) for the DMA. - Attribute tagMemorySpace; - if (copyOptions.tagMemorySpace != 0) - tagMemorySpace = prologue.getI64IntegerAttr(copyOptions.tagMemorySpace); - auto tagMemRefType = - MemRefType::get({1}, top.getIntegerType(32), - MemRefLayoutAttrInterface{}, tagMemorySpace); + auto tagMemRefType = MemRefType::get({1}, top.getIntegerType(32), {}, + copyOptions.tagMemorySpace); auto tagMemRef = prologue.create(loc, tagMemRefType); SmallVector tagIndices({zeroIndex}); diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index e202bb7a2a3d0..d47e360e9dc13 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -484,6 +484,14 @@ bool mlir::detail::isSupportedMemorySpace(Attribute memorySpace) { return false; } +Attribute mlir::detail::wrapIntegerMemorySpace(unsigned memorySpace, + MLIRContext *ctx) { + if (memorySpace == 0) + return nullptr; + + return IntegerAttr::get(IntegerType::get(ctx, 64), memorySpace); +} + Attribute mlir::detail::skipDefaultMemorySpace(Attribute memorySpace) { IntegerAttr intMemorySpace = llvm::dyn_cast_or_null(memorySpace); if (intMemorySpace && intMemorySpace.getValue() == 0) @@ -575,6 +583,46 @@ MemRefType::getChecked(function_ref emitErrorFn, elementType, layout, memorySpace); } +MemRefType MemRefType::get(ArrayRef shape, Type elementType, + AffineMap map, unsigned memorySpaceInd) { + + // Use default layout for empty map. + if (!map) + map = AffineMap::getMultiDimIdentityMap(shape.size(), + elementType.getContext()); + + // Wrap AffineMap into Attribute. + auto layout = AffineMapAttr::get(map); + + // Convert deprecated integer-like memory space to Attribute. + Attribute memorySpace = + wrapIntegerMemorySpace(memorySpaceInd, elementType.getContext()); + + return Base::get(elementType.getContext(), shape, elementType, layout, + memorySpace); +} + +MemRefType +MemRefType::getChecked(function_ref emitErrorFn, + ArrayRef shape, Type elementType, AffineMap map, + unsigned memorySpaceInd) { + + // Use default layout for empty map. + if (!map) + map = AffineMap::getMultiDimIdentityMap(shape.size(), + elementType.getContext()); + + // Wrap AffineMap into Attribute. + auto layout = AffineMapAttr::get(map); + + // Convert deprecated integer-like memory space to Attribute. + Attribute memorySpace = + wrapIntegerMemorySpace(memorySpaceInd, elementType.getContext()); + + return Base::getChecked(emitErrorFn, elementType.getContext(), shape, + elementType, layout, memorySpace); +} + LogicalResult MemRefType::verify(function_ref emitError, ArrayRef shape, Type elementType, MemRefLayoutAttrInterface layout, diff --git a/mlir/lib/IR/TypeDetail.h b/mlir/lib/IR/TypeDetail.h index 938cd9f2af40e..1d65fccb82b8e 100644 --- a/mlir/lib/IR/TypeDetail.h +++ b/mlir/lib/IR/TypeDetail.h @@ -140,6 +140,9 @@ struct TupleTypeStorage final /// Checks if the memorySpace has supported Attribute type. bool isSupportedMemorySpace(Attribute memorySpace); +/// Wraps deprecated integer memory space to the new Attribute form. +Attribute wrapIntegerMemorySpace(unsigned memorySpace, MLIRContext *ctx); + /// Replaces default memorySpace (integer == `0`) with empty Attribute. Attribute skipDefaultMemorySpace(Attribute memorySpace); diff --git a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir index b0a55271a4fa2..38771f2593449 100644 --- a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir +++ b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir @@ -19,10 +19,10 @@ // CHECK-SCF-IF-DAG: #[[$TIMES2:.*]] = affine_map<()[s0] -> (s0 * 2)> // CHECK-SCF-IF-DAG: #[[$TIMES4:.*]] = affine_map<()[s0] -> (s0 * 4)> // CHECK-SCF-IF-DAG: #[[$TIMES8:.*]] = affine_map<()[s0] -> (s0 * 8)> -// CHECK-SCF-IF-DAG: memref.global "private" @__shared_32xf32 : memref<32xf32, #gpu.address_space> -// CHECK-SCF-IF-DAG: memref.global "private" @__shared_64xf32 : memref<64xf32, #gpu.address_space> -// CHECK-SCF-IF-DAG: memref.global "private" @__shared_128xf32 : memref<128xf32, #gpu.address_space> -// CHECK-SCF-IF-DAG: memref.global "private" @__shared_256xf32 : memref<256xf32, #gpu.address_space> +// CHECK-SCF-IF-DAG: memref.global "private" @__shared_32xf32 : memref<32xf32, 3> +// CHECK-SCF-IF-DAG: memref.global "private" @__shared_64xf32 : memref<64xf32, 3> +// CHECK-SCF-IF-DAG: memref.global "private" @__shared_128xf32 : memref<128xf32, 3> +// CHECK-SCF-IF-DAG: memref.global "private" @__shared_256xf32 : memref<256xf32, 3> // CHECK-SCF-IF-LABEL: func @rewrite_warp_op_to_scf_if( // CHECK-SCF-IF-SAME: %[[laneid:.*]]: index, @@ -47,8 +47,8 @@ func.func @rewrite_warp_op_to_scf_if(%laneid: index, %r:2 = gpu.warp_execute_on_lane_0(%laneid)[32] args(%v0, %v1 : vector<4xf32>, vector<8xf32>) -> (vector<1xf32>, vector<2xf32>) { ^bb0(%arg0: vector<128xf32>, %arg1: vector<256xf32>): -// CHECK-SCF-IF: %[[arg1:.*]] = vector.transfer_read %[[buffer_v1]][%[[c0]]], %{{.*}} {in_bounds = [true]} : memref<256xf32, #gpu.address_space>, vector<256xf32> -// CHECK-SCF-IF: %[[arg0:.*]] = vector.transfer_read %[[buffer_v0]][%[[c0]]], %{{.*}} {in_bounds = [true]} : memref<128xf32, #gpu.address_space>, vector<128xf32> +// CHECK-SCF-IF: %[[arg1:.*]] = vector.transfer_read %[[buffer_v1]][%[[c0]]], %{{.*}} {in_bounds = [true]} : memref<256xf32, 3>, vector<256xf32> +// CHECK-SCF-IF: %[[arg0:.*]] = vector.transfer_read %[[buffer_v0]][%[[c0]]], %{{.*}} {in_bounds = [true]} : memref<128xf32, 3>, vector<128xf32> // CHECK-SCF-IF: %[[def_0:.*]] = "some_def"(%[[arg0]]) : (vector<128xf32>) -> vector<32xf32> // CHECK-SCF-IF: %[[def_1:.*]] = "some_def"(%[[arg1]]) : (vector<256xf32>) -> vector<64xf32> %2 = "some_def"(%arg0) : (vector<128xf32>) -> vector<32xf32> @@ -60,8 +60,8 @@ func.func @rewrite_warp_op_to_scf_if(%laneid: index, // CHECK-SCF-IF: } // CHECK-SCF-IF: gpu.barrier // CHECK-SCF-IF: %[[o1:.*]] = affine.apply #[[$TIMES2]]()[%[[laneid]]] -// CHECK-SCF-IF: %[[r1:.*]] = vector.transfer_read %[[buffer_def_1]][%[[o1]]], %{{.*}} {in_bounds = [true]} : memref<64xf32, #gpu.address_space>, vector<2xf32> -// CHECK-SCF-IF: %[[r0:.*]] = vector.transfer_read %[[buffer_def_0]][%[[laneid]]], %{{.*}} {in_bounds = [true]} : memref<32xf32, #gpu.address_space>, vector<1xf32> +// CHECK-SCF-IF: %[[r1:.*]] = vector.transfer_read %[[buffer_def_1]][%[[o1]]], %{{.*}} {in_bounds = [true]} : memref<64xf32, 3>, vector<2xf32> +// CHECK-SCF-IF: %[[r0:.*]] = vector.transfer_read %[[buffer_def_0]][%[[laneid]]], %{{.*}} {in_bounds = [true]} : memref<32xf32, 3>, vector<1xf32> // CHECK-SCF-IF: "some_use"(%[[r0]]) : (vector<1xf32>) -> () // CHECK-SCF-IF: "some_use"(%[[r1]]) : (vector<2xf32>) -> () "some_use"(%r#0) : (vector<1xf32>) -> () @@ -1065,18 +1065,18 @@ func.func @warp_execute_has_broadcast_semantics(%laneid: index, %s0: f32, %v0: v args(%s0, %v0, %v1, %v2 : f32, vector, vector<1xf32>, vector<1x1xf32>) -> (f32, vector, vector<1xf32>, vector<1x1xf32>) { ^bb0(%bs0: f32, %bv0: vector, %bv1: vector<1xf32>, %bv2: vector<1x1xf32>): - // CHECK-SCF-IF: vector.transfer_read {{.*}}[%[[C0]], %[[C0]]]{{.*}} {in_bounds = [true, true]} : memref<1x1xf32, #gpu.address_space>, vector<1x1xf32> - // CHECK-SCF-IF: vector.transfer_read {{.*}}[%[[C0]]]{{.*}} {in_bounds = [true]} : memref<1xf32, #gpu.address_space>, vector<1xf32> - // CHECK-SCF-IF: vector.transfer_read {{.*}}[]{{.*}} : memref>, vector - // CHECK-SCF-IF: memref.load {{.*}}[%[[C0]]] : memref<1xf32, #gpu.address_space> + // CHECK-SCF-IF: vector.transfer_read {{.*}}[%[[C0]], %[[C0]]]{{.*}} {in_bounds = [true, true]} : memref<1x1xf32, 3>, vector<1x1xf32> + // CHECK-SCF-IF: vector.transfer_read {{.*}}[%[[C0]]]{{.*}} {in_bounds = [true]} : memref<1xf32, 3>, vector<1xf32> + // CHECK-SCF-IF: vector.transfer_read {{.*}}[]{{.*}} : memref, vector + // CHECK-SCF-IF: memref.load {{.*}}[%[[C0]]] : memref<1xf32, 3> // CHECK-SCF-IF: "some_def_0"(%{{.*}}) : (f32) -> f32 // CHECK-SCF-IF: "some_def_1"(%{{.*}}) : (vector) -> vector // CHECK-SCF-IF: "some_def_1"(%{{.*}}) : (vector<1xf32>) -> vector<1xf32> // CHECK-SCF-IF: "some_def_1"(%{{.*}}) : (vector<1x1xf32>) -> vector<1x1xf32> - // CHECK-SCF-IF: memref.store {{.*}}[%[[C0]]] : memref<1xf32, #gpu.address_space> - // CHECK-SCF-IF: vector.transfer_write {{.*}}[] : vector, memref> - // CHECK-SCF-IF: vector.transfer_write {{.*}}[%[[C0]]] {in_bounds = [true]} : vector<1xf32>, memref<1xf32, #gpu.address_space> - // CHECK-SCF-IF: vector.transfer_write {{.*}}[%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<1x1xf32>, memref<1x1xf32, #gpu.address_space> + // CHECK-SCF-IF: memref.store {{.*}}[%[[C0]]] : memref<1xf32, 3> + // CHECK-SCF-IF: vector.transfer_write {{.*}}[] : vector, memref + // CHECK-SCF-IF: vector.transfer_write {{.*}}[%[[C0]]] {in_bounds = [true]} : vector<1xf32>, memref<1xf32, 3> + // CHECK-SCF-IF: vector.transfer_write {{.*}}[%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<1x1xf32>, memref<1x1xf32, 3> %rs0 = "some_def_0"(%bs0) : (f32) -> f32 %rv0 = "some_def_1"(%bv0) : (vector) -> vector @@ -1088,10 +1088,10 @@ func.func @warp_execute_has_broadcast_semantics(%laneid: index, %s0: f32, %v0: v } // CHECK-SCF-IF: gpu.barrier - // CHECK-SCF-IF: %[[RV2:.*]] = vector.transfer_read {{.*}}[%[[C0]], %[[C0]]]{{.*}} {in_bounds = [true, true]} : memref<1x1xf32, #gpu.address_space>, vector<1x1xf32> - // CHECK-SCF-IF: %[[RV1:.*]] = vector.transfer_read {{.*}}[%[[C0]]]{{.*}} {in_bounds = [true]} : memref<1xf32, #gpu.address_space>, vector<1xf32> - // CHECK-SCF-IF: %[[RV0:.*]] = vector.transfer_read {{.*}}[]{{.*}} : memref>, vector - // CHECK-SCF-IF: %[[RS0:.*]] = memref.load {{.*}}[%[[C0]]] : memref<1xf32, #gpu.address_space> + // CHECK-SCF-IF: %[[RV2:.*]] = vector.transfer_read {{.*}}[%[[C0]], %[[C0]]]{{.*}} {in_bounds = [true, true]} : memref<1x1xf32, 3>, vector<1x1xf32> + // CHECK-SCF-IF: %[[RV1:.*]] = vector.transfer_read {{.*}}[%[[C0]]]{{.*}} {in_bounds = [true]} : memref<1xf32, 3>, vector<1xf32> + // CHECK-SCF-IF: %[[RV0:.*]] = vector.transfer_read {{.*}}[]{{.*}} : memref, vector + // CHECK-SCF-IF: %[[RS0:.*]] = memref.load {{.*}}[%[[C0]]] : memref<1xf32, 3> // CHECK-SCF-IF: return %[[RS0]], %[[RV0]], %[[RV1]], %[[RV2]] : f32, vector, vector<1xf32>, vector<1x1xf32> return %r#0, %r#1, %r#2, %r#3 : f32, vector, vector<1xf32>, vector<1x1xf32> } @@ -1106,9 +1106,9 @@ func.func @warp_execute_nd_distribute(%laneid: index, %v0: vector<1x64x1xf32>, % -> (vector<1x64x1xf32>, vector<1x2x128xf32>) { // CHECK-SCF-IF-DAG: %[[C0:.*]] = arith.constant 0 : index - // CHECK-SCF-IF: vector.transfer_write %{{.*}}, %{{.*}}[%[[LANEID]], %c0, %c0] {in_bounds = [true, true, true]} : vector<1x64x1xf32>, memref<32x64x1xf32, #gpu.address_space> + // CHECK-SCF-IF: vector.transfer_write %{{.*}}, %{{.*}}[%[[LANEID]], %c0, %c0] {in_bounds = [true, true, true]} : vector<1x64x1xf32>, memref<32x64x1xf32, 3> // CHECK-SCF-IF: %[[RID:.*]] = affine.apply #[[$TIMES2]]()[%[[LANEID]]] - // CHECK-SCF-IF: vector.transfer_write %{{.*}}, %{{.*}}[%[[C0]], %[[RID]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x2x128xf32>, memref<1x64x128xf32, #gpu.address_space> + // CHECK-SCF-IF: vector.transfer_write %{{.*}}, %{{.*}}[%[[C0]], %[[RID]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x2x128xf32>, memref<1x64x128xf32, 3> // CHECK-SCF-IF: gpu.barrier // CHECK-SCF-IF: scf.if{{.*}}{ @@ -1116,12 +1116,12 @@ func.func @warp_execute_nd_distribute(%laneid: index, %v0: vector<1x64x1xf32>, % args(%v0, %v1 : vector<1x64x1xf32>, vector<1x2x128xf32>) -> (vector<1x64x1xf32>, vector<1x2x128xf32>) { ^bb0(%arg0: vector<32x64x1xf32>, %arg1: vector<1x64x128xf32>): - // CHECK-SCF-IF-DAG: %[[SR0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : memref<32x64x1xf32, #gpu.address_space>, vector<32x64x1xf32> - // CHECK-SCF-IF-DAG: %[[SR1:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : memref<1x64x128xf32, #gpu.address_space>, vector<1x64x128xf32> + // CHECK-SCF-IF-DAG: %[[SR0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : memref<32x64x1xf32, 3>, vector<32x64x1xf32> + // CHECK-SCF-IF-DAG: %[[SR1:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : memref<1x64x128xf32, 3>, vector<1x64x128xf32> // CHECK-SCF-IF: %[[W0:.*]] = "some_def_0"(%[[SR0]]) : (vector<32x64x1xf32>) -> vector<32x64x1xf32> // CHECK-SCF-IF: %[[W1:.*]] = "some_def_1"(%[[SR1]]) : (vector<1x64x128xf32>) -> vector<1x64x128xf32> - // CHECK-SCF-IF-DAG: vector.transfer_write %[[W0]], %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<32x64x1xf32>, memref<32x64x1xf32, #gpu.address_space> - // CHECK-SCF-IF-DAG: vector.transfer_write %[[W1]], %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x64x128xf32>, memref<1x64x128xf32, #gpu.address_space> + // CHECK-SCF-IF-DAG: vector.transfer_write %[[W0]], %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<32x64x1xf32>, memref<32x64x1xf32, 3> + // CHECK-SCF-IF-DAG: vector.transfer_write %[[W1]], %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x64x128xf32>, memref<1x64x128xf32, 3> %r0 = "some_def_0"(%arg0) : (vector<32x64x1xf32>) -> vector<32x64x1xf32> %r1 = "some_def_1"(%arg1) : (vector<1x64x128xf32>) -> vector<1x64x128xf32> @@ -1132,8 +1132,8 @@ func.func @warp_execute_nd_distribute(%laneid: index, %v0: vector<1x64x1xf32>, % // CHECK-SCF-IF: gpu.barrier // CHECK-SCF-IF: %[[WID:.*]] = affine.apply #[[$TIMES2]]()[%[[LANEID]]] - // CHECK-SCF-IF-DAG: %[[R0:.*]] = vector.transfer_read %{{.*}}[%[[LANEID]], %[[C0]], %[[C0]]], %cst {in_bounds = [true, true, true]} : memref<32x64x1xf32, #gpu.address_space>, vector<1x64x1xf32> - // CHECK-SCF-IF-DAG: %[[R1:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[WID]], %[[C0]]], %cst {in_bounds = [true, true, true]} : memref<1x64x128xf32, #gpu.address_space>, vector<1x2x128xf32> + // CHECK-SCF-IF-DAG: %[[R0:.*]] = vector.transfer_read %{{.*}}[%[[LANEID]], %[[C0]], %[[C0]]], %cst {in_bounds = [true, true, true]} : memref<32x64x1xf32, 3>, vector<1x64x1xf32> + // CHECK-SCF-IF-DAG: %[[R1:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[WID]], %[[C0]]], %cst {in_bounds = [true, true, true]} : memref<1x64x128xf32, 3>, vector<1x2x128xf32> // CHECK-SCF-IF: return %[[R0]], %[[R1]] : vector<1x64x1xf32>, vector<1x2x128xf32> return %r#0, %r#1 : vector<1x64x1xf32>, vector<1x2x128xf32> } diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp index b73c40adcffa7..eda2594fbc7c7 100644 --- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp +++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp @@ -518,17 +518,15 @@ struct TestVectorScanLowering static Value allocateGlobalSharedMemory(Location loc, OpBuilder &builder, gpu::WarpExecuteOnLane0Op warpOp, Type type) { - Attribute sharedMemorySpaceAttr = - builder.getAttr(gpu::AddressSpace::Workgroup); + static constexpr int64_t kSharedMemorySpace = 3; // Compute type of shared memory buffer. MemRefType memrefType; if (auto vectorType = dyn_cast(type)) { memrefType = - MemRefType::get(vectorType.getShape(), vectorType.getElementType(), - MemRefLayoutAttrInterface{}, sharedMemorySpaceAttr); + MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {}, + kSharedMemorySpace); } else { - memrefType = MemRefType::get({1}, type, MemRefLayoutAttrInterface{}, - sharedMemorySpaceAttr); + memrefType = MemRefType::get({1}, type, {}, kSharedMemorySpace); } // Get symbol table holding all shared memory globals.