Skip to content

Commit bf812ea

Browse files
committed
[mlir][linalg] remove the -now- obsolete sparse support in linalg
All glue and clutter in the linalg ops has been replaced by proper sparse tensor type encoding. This code is no longer needed. Thanks to ntv@ for giving us a temporary home in linalg. So long, and thanks for all the fish. Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D102098
1 parent 22d295f commit bf812ea

File tree

7 files changed

+16
-105
lines changed

7 files changed

+16
-105
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1000,19 +1000,6 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
10001000
});
10011001
}]
10021002
>,
1003-
InterfaceMethod<
1004-
/*desc=*/[{
1005-
Return whether the op has sparse tensor semantics.
1006-
}],
1007-
/*retTy=*/"bool",
1008-
/*methodName=*/"hasSparseSemantics",
1009-
/*args=*/(ins),
1010-
/*methodBody=*/"",
1011-
/*defaultImplementation=*/[{
1012-
return $_op->getAttr(getSparseAttrName()).
1013-
template dyn_cast_or_null<ArrayAttr>() != nullptr;
1014-
}]
1015-
>,
10161003
InterfaceMethod<
10171004
/*desc=*/[{
10181005
Return the name registered for this op when lowering to an external

mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -527,9 +527,7 @@ class GenericOpBase<string mnemonic> : LinalgStructuredBase_Op<mnemonic, [
527527
AffineMapArrayAttr:$indexing_maps,
528528
ArrayAttr:$iterator_types,
529529
OptionalAttr<StrAttr>:$doc,
530-
OptionalAttr<StrAttr>:$library_call,
531-
// ArrayAttr of StrArrayAttr:
532-
OptionalAttr<ArrayAttr>:$sparse);
530+
OptionalAttr<StrAttr>:$library_call);
533531
let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
534532
let regions = (region AnyRegion:$region);
535533
let extraClassDeclaration = structuredOpsBaseDecls # [{
@@ -583,8 +581,6 @@ def GenericOp : GenericOpBase<"generic"> {
583581
Each element of the list represents and iterator of one of the following
584582
types:
585583
parallel, reduction, window
586-
- sparse: an optional list with per-dimension sparsity annotations (either
587-
"D" for dense or "S" for sparse) for each input and output view.
588584

589585
Example:
590586
Defining a #matmul_trait attribute in MLIR can be done as follows:

mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,6 @@ constexpr StringRef getDocAttrName() { return "doc"; }
5858
/// function that implements the structured op.
5959
constexpr StringRef getLibraryCallAttrName() { return "library_call"; }
6060

61-
/// Attribute name for the ArrayAttr of StrArrayAttr that encodes sparsity.
62-
constexpr StringRef getSparseAttrName() { return "sparse"; }
63-
6461
/// Attribute name for the StrArrayAttr which encodes the value of strides.
6562
constexpr StringRef getStridesAttrName() { return "strides"; }
6663

@@ -129,18 +126,6 @@ inline StringRef toString(IteratorType t) {
129126
llvm_unreachable("Unsupported IteratorType");
130127
}
131128

132-
/// Use to encode a dense or sparse dimension.
133-
constexpr StringRef getSparseDimName() { return "S"; }
134-
inline bool isSparseDim(Attribute attr) {
135-
auto strAttr = attr.dyn_cast_or_null<StringAttr>();
136-
return strAttr && strAttr.getValue() == getSparseDimName();
137-
}
138-
constexpr StringRef getDenseDimName() { return "D"; }
139-
inline bool isDenseDim(Attribute attr) {
140-
auto strAttr = attr.dyn_cast_or_null<StringAttr>();
141-
return strAttr && strAttr.getValue() == getDenseDimName();
142-
}
143-
144129
} // end namespace mlir
145130

146131
#endif // MLIR_UTILS_STRUCTUREDOPSUTILS_H

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 4 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -447,8 +447,8 @@ void GenericOp::build(
447447
builder.getAffineMapArrayAttr(indexingMaps),
448448
builder.getStrArrayAttr(iteratorTypes),
449449
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
450-
libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
451-
ArrayAttr());
450+
libraryCall.empty() ? StringAttr()
451+
: builder.getStringAttr(libraryCall));
452452
if (!bodyBuild)
453453
return;
454454

@@ -502,8 +502,8 @@ void IndexedGenericOp::build(
502502
builder.getAffineMapArrayAttr(indexingMaps),
503503
builder.getStrArrayAttr(iteratorTypes),
504504
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
505-
libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
506-
ArrayAttr());
505+
libraryCall.empty() ? StringAttr()
506+
: builder.getStringAttr(libraryCall));
507507
if (!bodyBuild)
508508
return;
509509

@@ -676,58 +676,8 @@ void IndexedGenericOp::getEffects(
676676
getInputBuffers(), getOutputBuffers());
677677
}
678678

679-
namespace {
680-
681-
template <typename GenericOpType>
682-
struct AnnotationsVerifier {
683-
static LogicalResult verify(GenericOpType op) { return success(); }
684-
};
685-
686-
template <>
687-
LogicalResult AnnotationsVerifier<GenericOp>::verify(GenericOp op) {
688-
ArrayAttr sparseAttr = op.sparseAttr();
689-
if (!sparseAttr)
690-
return success();
691-
// Verify consistency of sparse annotations.
692-
if (!op.hasTensorSemantics())
693-
return op.emitOpError("expected sparse annotations on tensors only");
694-
if (op.getNumOutputs() != 1)
695-
return op.emitOpError("expected single output tensor");
696-
unsigned numTensors = op.getNumShapedOperands();
697-
if (sparseAttr.size() != numTensors)
698-
return op.emitOpError("expected one sparse annotation for each tensor");
699-
for (unsigned t = 0; t < numTensors; t++) {
700-
auto dimAttr = sparseAttr[t].dyn_cast_or_null<ArrayAttr>();
701-
if (!dimAttr)
702-
return op.emitOpError("expected sparse annotation array for tensor ")
703-
<< t;
704-
unsigned rank = op.getShapedType(t).getRank();
705-
if (dimAttr.size() != rank)
706-
return op.emitOpError("expected sparse annotation with rank ")
707-
<< rank << " for tensor " << t;
708-
// Per-dimension annotations for each tensor consist of only "D" or "S".
709-
for (unsigned d = 0; d < rank; d++) {
710-
if (isDenseDim(dimAttr[d])) {
711-
continue;
712-
} else if (isSparseDim(dimAttr[d])) {
713-
if (t == numTensors - 1)
714-
return op.emitOpError("sparse output tensors not supported (yet)");
715-
continue;
716-
}
717-
return op.emitOpError("expected sparse annotation at position ")
718-
<< d << " for tensor " << t;
719-
}
720-
}
721-
return success();
722-
}
723-
724-
} // namespace
725-
726679
template <typename GenericOpType>
727680
static LogicalResult verifyGenericOp(GenericOpType op) {
728-
if (failed(AnnotationsVerifier<GenericOpType>::verify(op)))
729-
return failure();
730-
731681
return success();
732682
}
733683

mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ finalizeBufferAllocationForGenericOp(ConversionPatternRewriter &rewriter,
8888
/*inputs=*/inputs,
8989
/*outputs=*/outputs, genericOp.indexing_maps(),
9090
genericOp.iterator_types(), genericOp.docAttr(),
91-
genericOp.library_callAttr(), genericOp.sparseAttr());
91+
genericOp.library_callAttr());
9292

9393
// Create a new block in the region of the new Generic Op.
9494
Block *oldBlock = genericOp.getBody();

mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -321,8 +321,7 @@ fuseElementwiseOpsImpl(LinalgOp producer, OpOperand &consumerOpOperand,
321321
consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps),
322322
consumer.iterator_types(),
323323
/*doc=*/nullptr,
324-
/*library_call=*/nullptr,
325-
/*sparse=*/nullptr);
324+
/*library_call=*/nullptr);
326325
} else {
327326
fusedOp = rewriter.create<IndexedGenericOp>(
328327
consumer.getLoc(), consumer->getResultTypes(),
@@ -331,8 +330,7 @@ fuseElementwiseOpsImpl(LinalgOp producer, OpOperand &consumerOpOperand,
331330
consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps),
332331
consumer.iterator_types(),
333332
/*doc=*/nullptr,
334-
/*library_call=*/nullptr,
335-
/*sparse=*/nullptr);
333+
/*library_call=*/nullptr);
336334
}
337335

338336
// Construct an AffineMap from consumer loops to producer loops.
@@ -1260,8 +1258,7 @@ struct FoldConsumerReshapeOpByLinearization
12601258
/*outputs=*/output, rewriter.getAffineMapArrayAttr(fusedIndexMaps),
12611259
producer.iterator_types(),
12621260
/*doc=*/nullptr,
1263-
/*library_call=*/nullptr,
1264-
/*sparse=*/nullptr);
1261+
/*library_call=*/nullptr);
12651262
auto &fusedRegion = fusedOp->getRegion(0);
12661263
rewriter.cloneRegionBefore(producer->getRegion(0), fusedRegion,
12671264
fusedRegion.begin());
@@ -1352,8 +1349,7 @@ class FoldSplatConstants : public OpRewritePattern<LinalgOpTy> {
13521349
rewriter.getAffineMapArrayAttr(fusedIndexMaps),
13531350
linalgOp.iterator_types(),
13541351
/*doc=*/nullptr,
1355-
/*library_call=*/nullptr,
1356-
/*sparse=*/nullptr);
1352+
/*library_call=*/nullptr);
13571353

13581354
// Map the block argument corresponding to the replaced argument with the
13591355
// scalar constant.

mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -89,20 +89,18 @@ def prepare_common_structured_op(op_config: LinalgStructuredOpConfig,
8989
for am in AffineMap.compress_unused_symbols(op_config.indexing_maps, Context.current)])
9090
iterator_types_attr = ArrayAttr.get(
9191
[StringAttr.get(s) for s in op_config.iterator_types])
92-
# TODO: Add support for sparse operands once there is a stable interface.
93-
sparse_attr = None
9492

9593
return (all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types,
9694
type_mapping, capture_arg_mapping, indexing_maps_attr,
97-
iterator_types_attr, sparse_attr)
95+
iterator_types_attr)
9896

9997

10098
def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
10199
*ins: Value,
102100
outs: Sequence[Value] = (),
103101
captures: Sequence[Value] = ()):
104102
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
105-
capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
103+
capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \
106104
prepare_common_structured_op(op_config, *ins, outs = outs,
107105
captures=captures)
108106

@@ -113,8 +111,7 @@ def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
113111
indexing_maps=indexing_maps_attr,
114112
iterator_types=iterator_types_attr,
115113
doc=None, # TODO: Make optional.
116-
library_call=None, # TODO: Make optional.
117-
sparse=sparse_attr) # TODO: Make optional.
114+
library_call=None) # TODO: Make optional.
118115

119116
# Construct the body.
120117
block_arg_names = _get_tensor_def_names(*in_arg_defs, *out_arg_defs)
@@ -141,7 +138,7 @@ def emit_named_structured_op(op_config: LinalgStructuredOpConfig,
141138
outs: Sequence[Value] = (),
142139
captures: Sequence[Value] = ()):
143140
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
144-
capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
141+
capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \
145142
prepare_common_structured_op(op_config, *ins, outs = outs,
146143
captures = captures)
147144

@@ -351,8 +348,8 @@ def _get_tensor_def_names(
351348
def _add_type_mapping(name: str, type: Type, type_mapping: Dict[str, Type]):
352349
if name in type_mapping:
353350
if type_mapping[name] != type:
354-
raise ValueError(f"Cannot overwrite type mapping {name} = "
355-
f"{type_mapping[name]} by type {type}")
351+
raise ValueError(f"Cannot overwrite type mapping {name} = "
352+
f"{type_mapping[name]} by type {type}")
356353
type_mapping[name] = type
357354

358355
def _is_floating_point_type(t: Type) -> bool:

0 commit comments

Comments
 (0)