/
ComprehensiveBufferize.cpp
2583 lines (2268 loc) · 105 KB
/
ComprehensiveBufferize.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//===- ComprehensiveBufferize.cpp - Single pass bufferization -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Perform inplace bufferization within function boundaries.
// This is a specialized pass that supports inplace analysis for a fixed subset
// of ops that have well-defined inplace semantics.
// This pass caters to high-performance codegen where buffer reuse is deemed
// critical: the pass should fail if the bufferized form of the function needs
// to return any buffer.
// Generic control-flow and branching are unsupported.
// Composability with extensible set of ops is not a first-class concern.
//
// Bufferization occurs by:
// a. performing an inPlace analysis `inPlaceAnalysisFuncOpBody`
// which marks each operation within the function with the
// `kInPlaceResultsAttrName` attribute.
// b. traversing each operation in the function and rewriting it in
// buffer form and keeping a BlockAndValueMapping mapping of the
// rewrites. New allocations are introduced during this step.
// TODO: Allocation + depending op hoisting to outermost enclosing
// sequential scope.
// c. at the end of this bufferization, 3 cases may occur:
// i. inplaceable function arguments may be reused in place after the
// function itself has been bufferized. This is encoded by IR resembling:
//
// ```
// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// func @foo(%A: tensor<?xf32> {linalg.inplaceable = true})
// -> tensor<?xf32> {
// %0 = memref.buffer_cast %A : memref<?xf32, #map>
// // ... uses of %0
// %res = memref.tensor_load %0 : memref<?xf32, #map>
// return %res : tensor<?xf32>
// }
// ```
//
// this is the cue for the bufferization of the function foo (and calls
// to it) may bufferize to `func @foo(%A: memref<?xf32, some_layout>)`.
// To fully achieve bufferization, an additional analysis is needed to
// determine whether function argument/operand pairs bufferize to a
// single inplace buffer argument (i.e. functions may return tensors in
// arbitrary order that may not match argument numbers).
//
// ii. results that don't map to an inplaceable function argument are
// generally allocated. Since memref semantics wrt ownership of the
// underlying memory region are not well-defined, comprehensive
// bufferization chooses to perform allocations in a scoped fashion:
// returning memrefs is always considered illegal.
// Such scenarios are encoded by IR resembling:
//
// ```
// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// func @foo(%A: tensor<?xf32> {linalg.inplaceable = true})
// -> tensor<?xf32> {
// %0 = memref.buffer_cast %A : memref<?xf32, #map>
// %1 = memref.dim %0, %c0 : memref<?xf32, #map>
// %2 = memref.alloc(%1) : memref<?xf32>
// %3 = memref.cast %2 : memref<?xf32> to memref<?xf32, #map>
// // ... uses of %3
// memref.dealloc %2 : memref<?xf32, #map>
// %res = memref.tensor_load %3 : memref<?xf32, #map>
// return %res : tensor<?xf32>
// }
// ```
//
// this is the cue for the bufferization of the function foo (and calls
// to it) that it must bufferize to `func @foo(%A: memref<?xf32,
// some_layout>,
// %B: memref<?xf32, some_layout>)` (i.e. make a cloned
// allocation of the result tensor)
// To fully achieve bufferization, the alloc/dealloc pair must be lifted
// out of the function at each call site.
//
// iii. as an optimization over ii., it may be possible to reuse an argument
// and only want to return a slice.
// This may forego allocation by letting *all* callers decide whether to
// pass a new *aliasing* memref function argument (i.e. a subview).
// Without loss of generality, callers may agree to allocate a new buffer
// to avoid this aliasing. Such scenarios are encoded by IR resembling:
//
// ```
// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// func @foo(%arg0: tensor<?xf32> {linalg.inplaceable = true})
// -> tensor<4xf32> {
// %0 = memref.buffer_cast %arg0 : memref<?xf32, #map>
// %1 = memref.subview %0[0] [4] [1] : memref<?xf32, #map> to
// memref<4xf32, #map>
// // ... inplace computes into %1
// %3 = memref.tensor_load %1 : memref<4xf32, #map>
// return %3 : tensor<4xf32>
// }
// ```
//
// Note: In the future, it may be worthwhile to design special bufferization
// ops to encode the desired semantics at function boundaries for i., ii. and
// iii.
//
// Lastly, note that layout map chosen to bufferize is the most dynamic
// canonical strided layout of the proper rank. This ensures compatibility with
// expected layouts after transformations. Combinations of memref.cast +
// canonicalization are responsible for clean ups.
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.h"
#include <random>
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/BufferUtils.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/FormatVariadic.h"
#define DEBUG_TYPE "comprehensive-module-bufferize"
using namespace mlir;
using namespace linalg;
using namespace tensor;
using namespace comprehensive_bufferize;
#define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
#define LDBG(X) LLVM_DEBUG(DBGS() << X)
// Forward declarations.
#ifndef NDEBUG
static std::string printOperationInfo(Operation *, bool prefix = true);
static std::string printValueInfo(Value, bool prefix = true);
#endif
//===----------------------------------------------------------------------===//
// Generic helpers.
//===----------------------------------------------------------------------===//
static bool isaTensor(Type t) { return t.isa<TensorType>(); }
/// Return the FuncOp called by `callOp`.
static FuncOp getCalledFunction(CallOpInterface callOp) {
SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast<SymbolRefAttr>();
if (!sym)
return nullptr;
return dyn_cast_or_null<FuncOp>(
SymbolTable::lookupNearestSymbolFrom(callOp, sym));
}
/// Return the unique ReturnOp that terminates `funcOp`.
/// Return nullptr if there is no such unique ReturnOp.
static ReturnOp getAssumedUniqueReturnOp(FuncOp funcOp) {
ReturnOp returnOp;
for (Block &b : funcOp.body()) {
if (auto candidateOp = dyn_cast<ReturnOp>(b.getTerminator())) {
if (returnOp)
return nullptr;
returnOp = candidateOp;
}
}
return returnOp;
}
//===----------------------------------------------------------------------===//
// Bufferization-specific attribute manipulation.
// These are for testing and debugging only. Bufferization information is
// stored in BufferizationAliasInfo. When run with `testAnalysisOnly`, the IR
// is annotated with the results of the analysis (copied from
// BufferizationAliasInfo), so that they can be checked in tests.
//===----------------------------------------------------------------------===//
/// Attribute marker to specify op results that can be bufferized inPlace.
constexpr StringLiteral kInPlaceResultsAttrName = "__inplace_results_attr__";
/// Mark whether OpResult can actually be bufferized inplace.
/// If `inPlace` is `true`, the use-def chain analysis has guaranteed that no
/// subsequent write would occur to the bufferized tensor value (i.e. the result
/// can be bufferized inplace).
static void setInPlaceOpResult(OpResult opResult, bool inPlace) {
if (!opResult)
return;
Operation *op = opResult.getOwner();
auto attr =
op->getAttr(kInPlaceResultsAttrName).dyn_cast_or_null<ArrayAttr>();
SmallVector<StringRef> inPlaceVector =
attr ? SmallVector<StringRef>(
llvm::to_vector<4>(attr.getAsValueRange<StringAttr>()))
: SmallVector<StringRef>(op->getNumResults(), "false");
LDBG("->set inPlace=" << inPlace << " <- #" << opResult.getResultNumber()
<< ": " << printOperationInfo(op) << "\n");
inPlaceVector[opResult.getResultNumber()] = inPlace ? "true" : "false";
op->setAttr(kInPlaceResultsAttrName,
OpBuilder(op).getStrArrayAttr(inPlaceVector));
}
/// Set the attribute that triggers inplace bufferization on a FuncOp argument
/// `bbArg`.
static void setInPlaceFuncArgument(BlockArgument bbArg, bool inPlace) {
auto funcOp = cast<FuncOp>(bbArg.getOwner()->getParentOp());
funcOp.setArgAttr(bbArg.getArgNumber(),
BufferizableOpInterface::kInplaceableAttrName,
BoolAttr::get(bbArg.getContext(), inPlace));
}
/// Remove the attribute that triggers inplace bufferization on a FuncOp
/// argument `bbArg`.
static void removeBufferizationFuncArguments(BlockArgument bbArg) {
auto funcOp = cast<FuncOp>(bbArg.getOwner()->getParentOp());
funcOp.removeArgAttr(bbArg.getArgNumber(),
BufferizableOpInterface::kBufferLayoutAttrName);
funcOp.removeArgAttr(bbArg.getArgNumber(),
BufferizableOpInterface::kInplaceableAttrName);
}
//===----------------------------------------------------------------------===//
// Printing helpers.
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
/// Helper method printing the bufferization information of a buffer / tensor.
static void printTensorOrBufferInfo(std::string prefix, Value value,
AsmState &state, llvm::raw_ostream &os) {
if (!value.getType().isa<ShapedType>())
return;
os << prefix;
value.printAsOperand(os, state);
os << " : " << value.getType();
}
/// Print the operation name and bufferization information.
static std::string printOperationInfo(Operation *op, bool prefix) {
std::string result;
llvm::raw_string_ostream os(result);
AsmState state(op->getParentOfType<mlir::FuncOp>());
StringRef tab = prefix ? "\n[" DEBUG_TYPE "]\t" : "";
os << tab << op->getName();
SmallVector<Value> shapedOperands;
for (OpOperand &opOperand : op->getOpOperands()) {
std::string prefix =
llvm::formatv("{0} -> #{1} ", tab, opOperand.getOperandNumber());
printTensorOrBufferInfo(prefix, opOperand.get(), state, os);
}
for (OpResult opResult : op->getOpResults()) {
std::string prefix =
llvm::formatv("{0} <- #{1} ", tab, opResult.getResultNumber());
printTensorOrBufferInfo(prefix, opResult, state, os);
}
return result;
}
/// Print the bufferization information for the defining op or block argument.
static std::string printValueInfo(Value value, bool prefix) {
auto *op = value.getDefiningOp();
if (op)
return printOperationInfo(op, prefix);
// Print the block argument bufferization information.
std::string result;
llvm::raw_string_ostream os(result);
AsmState state(value.getParentRegion()->getParentOfType<mlir::FuncOp>());
os << value;
printTensorOrBufferInfo("\n\t - ", value, state, os);
return result;
}
#endif
//===----------------------------------------------------------------------===//
// Bufferization-specific alias analysis.
//===----------------------------------------------------------------------===//
/// Return true if opOperand has been decided to bufferize in-place.
static bool isInplaceMemoryWrite(OpOperand &opOperand,
const BufferizationAliasInfo &aliasInfo) {
// Ops that do not bufferize to a memory write, cannot be write in-place.
if (!bufferizesToMemoryWrite(opOperand))
return false;
OpResult opResult = getAliasingOpResult(opOperand);
return opResult && aliasInfo.isInPlace(opResult);
}
/// Return true if, under current bufferization decisions, the buffer of `value`
/// is not writable.
static bool aliasesNonWritableBuffer(Value value,
const BufferizationAliasInfo &aliasInfo) {
LDBG("WRITABILITY ANALYSIS FOR " << printValueInfo(value) << "\n");
bool foundNonWritableBuffer = false;
aliasInfo.applyOnAliases(value, [&](Value v) {
// Some values are known to be writable.
if (aliasInfo.bufferizesToWritableMemory(v))
return;
// Query BufferizableOpInterface to see if the OpResult is writable.
// TODO: Out-of-place bufferized OpResult could be considered writable.
if (auto bufferizableOp = v.getDefiningOp<BufferizableOpInterface>())
if (bufferizableOp && bufferizableOp.isWritable(v))
return;
// Query BufferizableOpInterface to see if the BlockArgument is writable.
if (auto bbArg = v.dyn_cast<BlockArgument>())
if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(
bbArg.getOwner()->getParentOp()))
if (bufferizableOp.isWritable(bbArg))
return;
foundNonWritableBuffer = true;
});
if (foundNonWritableBuffer)
LDBG("--> NON WRITABLE\n");
else
LDBG("--> WRITABLE\n");
return foundNonWritableBuffer;
}
/// Return true if the buffer to which `operand` would bufferize is equivalent
/// to some buffer write.
static bool aliasesInPlaceWrite(Value value,
const BufferizationAliasInfo &aliasInfo) {
LDBG("----Start aliasesInPlaceWrite\n");
LDBG("-------for : " << printValueInfo(value) << '\n');
bool foundInplaceWrite = false;
aliasInfo.applyOnAliases(value, [&](Value v) {
for (auto &use : v.getUses()) {
if (isInplaceMemoryWrite(use, aliasInfo)) {
LDBG("-----------wants to bufferize to inPlace write: "
<< printOperationInfo(use.getOwner()) << '\n');
foundInplaceWrite = true;
return;
}
}
});
if (!foundInplaceWrite)
LDBG("----------->does not alias an inplace write\n");
return foundInplaceWrite;
}
/// Return true if `a` happens before `b`, i.e., `a` or one of its ancestors
/// properly dominates `b` and `b` is not inside `a`.
static bool happensBefore(Operation *a, Operation *b,
const DominanceInfo &domInfo) {
do {
// TODO: Instead of isProperAncestor + properlyDominates, we should use
// properlyDominatesImpl(a, b, /*enclosingOpOk=*/false)
if (a->isProperAncestor(b))
return false;
if (domInfo.properlyDominates(a, b))
return true;
} while ((a = a->getParentOp()));
return false;
}
/// Given sets of uses and writes, return true if there is a RaW conflict under
/// the assumption that all given reads/writes alias the same buffer and that
/// all given writes bufferize inplace.
///
/// A conflict is: According to SSA use-def chains, a read R is supposed to read
/// the result of a write W1. But because of bufferization decisions, R actually
/// reads another write W2.
static bool
hasReadAfterWriteInterference(const DenseSet<OpOperand *> &usesRead,
const DenseSet<OpOperand *> &usesWrite,
const DominanceInfo &domInfo,
const BufferizationAliasInfo &aliasInfo) {
for (OpOperand *uRead : usesRead) {
Operation *readingOp = uRead->getOwner();
// Find most recent write of uRead by following the SSA use-def chain. E.g.:
//
// %0 = "writing_op"(%t) : tensor<?x32> -> tensor<?xf32>
// %1 = "aliasing_op"(%0) : tensor<?x32> -> tensor<?xf32>
// %2 = "reading_op"(%1) : : tensor<?x32> -> not_a_tensor_type
//
// In the above example, if uRead is the OpOperand of reading_op, lastWrite
// is %0. Note that operations that create an alias but do not write (such
// as ExtractSliceOp) are skipped.
Value lastWrite = findLastPrecedingWrite(uRead->get());
// Look for conflicting memory writes. Potential conflicts are writes to an
// alias that have been decided to bufferize inplace.
for (OpOperand *uConflictingWrite : usesWrite) {
// Throughout this loop, check for multiple requirements that have to be
// met for uConflictingWrite to be an actual conflict.
Operation *conflictingWritingOp = uConflictingWrite->getOwner();
// Print some debug info.
LDBG("Found potential conflict:\n");
LDBG("READ = #" << uRead->getOperandNumber() << " of "
<< printOperationInfo(readingOp) << "\n");
LDBG("CONFLICTING WRITE = #"
<< uConflictingWrite->getOperandNumber() << " of "
<< printOperationInfo(conflictingWritingOp) << "\n");
// No conflict if the readingOp dominates conflictingWritingOp, i.e., the
// write is not visible when reading.
if (happensBefore(readingOp, conflictingWritingOp, domInfo))
continue;
// No conflict if the reading use equals the use of the conflicting write.
// A use cannot conflict with itself. Note: Just being the same op is not
// enough. It has to be the same use.
if (uConflictingWrite == uRead)
continue;
// No conflict if the op interface says so.
if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(readingOp))
if (bufferizableOp.isNotConflicting(uRead, uConflictingWrite,
aliasInfo))
continue;
if (conflictingWritingOp != readingOp)
if (auto bufferizableOp =
dyn_cast<BufferizableOpInterface>(conflictingWritingOp))
if (bufferizableOp.isNotConflicting(uRead, uConflictingWrite,
aliasInfo))
continue;
// Special rules for branches.
// TODO: Use an interface.
if (scf::insideMutuallyExclusiveBranches(readingOp, conflictingWritingOp))
continue;
LDBG("WRITE = #" << printValueInfo(lastWrite) << "\n");
// No conflict if the conflicting write happens before the last
// write.
if (Operation *writingOp = lastWrite.getDefiningOp()) {
if (happensBefore(conflictingWritingOp, writingOp, domInfo))
// conflictingWritingOp happens before writingOp. No conflict.
continue;
// No conflict if conflictingWritingOp is contained in writingOp.
if (writingOp->isProperAncestor(conflictingWritingOp))
continue;
} else {
auto bbArg = lastWrite.cast<BlockArgument>();
Block *block = bbArg.getOwner();
if (!block->findAncestorOpInBlock(*conflictingWritingOp))
// conflictingWritingOp happens outside of the block. No
// conflict.
continue;
}
// No conflict if the conflicting write and the last write are the same
// use.
if (getAliasingOpResult(*uConflictingWrite) == lastWrite)
continue;
// All requirements are met. Conflict found!
LDBG("CONFLICT CONFIRMED!\n\n");
return true;
}
}
LDBG("NOT A CONFLICT!\n\n");
return false;
}
/// Return true if bufferizing result inplace would create a conflict. A read R
/// and a write W of the same alias set is a conflict if inplace bufferization
/// of W changes the value read by R to a value different from the one that
/// would be expected by tracing back R's origin through SSA use-def chains.
/// A conflict can only be introduced by a new alias and/or an inplace
/// bufferization decision.
///
/// Example:
/// %0 = tensor.extract_slice %t[...][...][1, 1] {inplace?}
/// %1 = vector.transfer_write %v1, %t {inplace} : vector<5xf32>, tensor<?xf32>
/// %e = tensor.extract_slice %1
/// %2 = vector.transfer_write %v2, %0 {inplace} : vector<6xf32>, tensor<?xf32>
/// %3 = vector.transfer_read %e, %cst : tensor<?xf32>, vector<7xf32>
///
/// In the above example, the two TransferWriteOps have already been decided to
/// bufferize inplace. Bufferizing the ExtractSliceOp inplace would create a
/// conflict because:
/// * According to SSA use-def chains, we expect to read the result of %1.
/// * However, adding an alias {%0, %t} would mean that the second
/// TransferWriteOp overwrites the first one. Therefore, the TransferReadOp
/// would no longer be reading the result of %1.
///
/// If `checkConsistencyOnly` is true, this function checks if there is a
/// read-after-write conflict without bufferizing `operand` inplace. This would
/// indicate a problem with the current inplace bufferization decisions.
bool wouldCreateReadAfterWriteInterference(
OpOperand &operand, OpResult result, const DominanceInfo &domInfo,
const BufferizationAliasInfo &aliasInfo,
bool checkConsistencyOnly = false) {
#ifndef NDEBUG
SmallVector<OpOperand *> opOperands = getAliasingOpOperand(result);
assert(llvm::find(opOperands, &operand) != opOperands.end() &&
"operand and result do not match");
#endif // NDEBUG
// Helper function to iterate on aliases of `root` and capture the reads.
auto getAliasingReads = [&](DenseSet<OpOperand *> &res, Value root) {
aliasInfo.applyOnAliases(root, [&](Value alias) {
for (auto &use : alias.getUses())
// Read to a value that aliases root.
if (bufferizesToMemoryRead(use))
res.insert(&use);
});
};
// Helper function to iterate on aliases of `root` and capture the writes.
auto getAliasingInplaceWrites = [&](DenseSet<OpOperand *> &res, Value root) {
aliasInfo.applyOnAliases(root, [&](Value alias) {
for (auto &use : alias.getUses())
// Inplace write to a value that aliases root.
if (isInplaceMemoryWrite(use, aliasInfo))
res.insert(&use);
});
};
// Collect reads and writes of all aliases of OpOperand and OpResult.
DenseSet<OpOperand *> usesRead, usesWrite;
getAliasingReads(usesRead, operand.get());
getAliasingReads(usesRead, result);
getAliasingInplaceWrites(usesWrite, operand.get());
getAliasingInplaceWrites(usesWrite, result);
if (!checkConsistencyOnly && bufferizesToMemoryWrite(operand))
usesWrite.insert(&operand);
return hasReadAfterWriteInterference(usesRead, usesWrite, domInfo, aliasInfo);
}
/// Return true if bufferizing `opOperand` inplace with `opResult` would create
/// a write to a non-writable buffer.
static bool
wouldCreateWriteToNonWritableBuffer(OpOperand &opOperand, OpResult opResult,
const BufferizationAliasInfo &aliasInfo) {
#ifndef NDEBUG
SmallVector<OpOperand *> opOperands = getAliasingOpOperand(opResult);
assert(llvm::find(opOperands, &opOperand) != opOperands.end() &&
"operand and result do not match");
#endif // NDEBUG
// Certain buffers are not writeable:
// 1. A function bbArg that is not inplaceable or
// 2. A constant op.
assert(!aliasesNonWritableBuffer(opResult, aliasInfo) &&
"expected that opResult does not alias non-writable buffer");
bool nonWritable = aliasesNonWritableBuffer(opOperand.get(), aliasInfo);
if (!nonWritable)
return false;
// This is a problem only if the buffer is written to via some alias.
bool hasWrite = aliasesInPlaceWrite(opResult, aliasInfo) ||
aliasesInPlaceWrite(opOperand.get(), aliasInfo) ||
bufferizesToMemoryWrite(opOperand);
if (!hasWrite)
return false;
LDBG("->the corresponding buffer is not writeable\n");
return true;
}
//===----------------------------------------------------------------------===//
// Forward declarations.
//===----------------------------------------------------------------------===//
/// Return the op with Allocate MemoryEffect if `v` is equivalent to an such
/// an op. Return null otherwise.
static Operation *getEquivalentAlloc(Value value,
const BufferizationAliasInfo &aliasInfo);
/// Return the first argument of the enclosing FuncOp that is equivalent to `v`.
/// Return null if no such bbArg can be found.
static BlockArgument
getEquivalentEnclosingFuncBBArg(Value v,
const BufferizationAliasInfo &aliasInfo);
//===----------------------------------------------------------------------===//
// Bufferization-specific MemRefType support.
//===----------------------------------------------------------------------===//
/// Return a contiguous MemRefType (i.e. with canonical/empty layout map)
/// with the same shape as `shapedType` and specified `layout` and
/// `addressSpace`.
static MemRefType getContiguousMemRefType(ShapedType shapedType,
MemRefLayoutAttrInterface layout = {},
Attribute memorySpace = {}) {
return MemRefType::get(shapedType.getShape(), shapedType.getElementType(),
layout, memorySpace);
}
/// Return a contiguous MemRefType (i.e. with canonical/empty layout map)
/// with the same shape as `shapedType` and specified `layout` and
/// `addressSpace` or an UnrankedMemRefType otherwise.
static Type
getContiguousOrUnrankedMemRefType(Type type,
MemRefLayoutAttrInterface layout = {},
Attribute memorySpace = {}) {
if (type.isa<RankedTensorType, MemRefType>())
return getContiguousMemRefType(type.cast<ShapedType>(), layout,
memorySpace);
assert(!layout && "expected empty layout with UnrankedMemRefType");
return UnrankedMemRefType::get(getElementTypeOrSelf(type), memorySpace);
}
/// Return a MemRefType to which the `tensorType` can be bufferized in a
/// composable fashion. The layout must be the most dynamic possible and
/// canonicalize away once bufferization is finished.
static MemRefType getDynamicMemRefType(RankedTensorType tensorType,
unsigned addressSpace = 0) {
// TODO: address space decisions to connect with the actual alloc.
int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
SmallVector<int64_t> dynamicStrides(tensorType.getRank(),
ShapedType::kDynamicStrideOrOffset);
AffineMap stridedLayout = makeStridedLinearLayoutMap(
dynamicStrides, dynamicOffset, tensorType.getContext());
return MemRefType::get(tensorType.getShape(), tensorType.getElementType(),
stridedLayout, addressSpace);
}
/// Return the FunctionType with `argumentTypes` and `resultTypes` where each
/// tensor is replaced by the corresponding buffer type.
/// In order for all the callers to agree, this *must* bufferize to the most
/// dynamic buffer type supported.
/// A later pass across all CallOps in the module can decide whether to simplify
/// the types of to version according to some cost model.
static FunctionType getBufferizedFunctionType(MLIRContext *ctx,
TypeRange argumentTypes,
TypeRange resultTypes) {
auto rewrite = [](Type t) -> Type {
// TODO: non-zero address space.
// TODO: layout information if relevant.
if (auto rankedTensorType = t.dyn_cast<RankedTensorType>())
return getDynamicMemRefType(rankedTensorType);
if (auto tensorType = t.dyn_cast<TensorType>())
return getContiguousOrUnrankedMemRefType(tensorType);
return t;
};
auto argTypes = llvm::to_vector<4>(llvm::map_range(argumentTypes, rewrite));
auto retTypes = llvm::to_vector<4>(llvm::map_range(resultTypes, rewrite));
return FunctionType::get(ctx, argTypes, retTypes);
}
/// If an entry for `funcOp` is available in `bufferizedFunctionTypes`, return
/// it. Otherwise, construct a new entry based on `argumentTypes` and
/// `resultTypes`.
// TODO: improve the layering.
static FunctionType getOrCreateBufferizedFunctionType(
FuncOp funcOp, TypeRange argumentTypes, TypeRange resultTypes,
DenseMap<FuncOp, FunctionType> &bufferizedFunctionTypes) {
auto it = bufferizedFunctionTypes.find(funcOp);
if (it != bufferizedFunctionTypes.end())
return it->second;
auto it2 = bufferizedFunctionTypes.try_emplace(
funcOp, getBufferizedFunctionType(funcOp.getContext(), argumentTypes,
resultTypes));
LDBG("FT: " << funcOp.getType() << " -> " << it2.first->second << "\n");
return it2.first->second;
}
//===----------------------------------------------------------------------===//
// Bufferization-specific scoped alloc/dealloc insertion support.
//===----------------------------------------------------------------------===//
/// Move the insertion point of the given builder to the beginning of a
/// surrounding block as much as possible, while not crossing any allocation
/// hoisting barriers.
static void moveInsertionPointToAllocationHoistingBarrier(OpBuilder &b) {
Operation *op = b.getInsertionBlock()->getParentOp();
while (op) {
if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
if (bufferizableOp.isAllocationHoistingBarrier())
break;
op = op->getParentOp();
}
// FuncOp is an allocation hoisting barrier, so the above loop should never
// run out of parents.
assert(
(op && cast<BufferizableOpInterface>(op).isAllocationHoistingBarrier()) &&
"expected traversal to end at allocation hoisting barrier");
// TODO: Handle cases where allocation hoisting barrier has more than one
// region or block.
assert(op->getNumRegions() == 1 &&
"allocation hoisting barriers with >1 regions not supported");
assert(op->getRegion(0).getBlocks().size() == 1 &&
"allocation hoisting barriers with >1 blocks not supported");
b.setInsertionPointToStart(&(op->getRegion(0).front()));
}
/// Compute the type of the `memref` to use for allocating the buffer for
/// `shapedValue`. Also returns (by reference in `dynShape`), the value for the
/// dynamic dimensions in the returned `memref` type. The function may also set
/// the insertion point to an earlier location, where the allocation should
/// happen ("allocation hoisting").
static MemRefType getAllocationTypeAndShape(OpBuilder &b, Location loc,
Value shapedValue,
SmallVectorImpl<Value> &dynShape) {
MemRefType allocMemRefType =
getContiguousMemRefType(shapedValue.getType().cast<ShapedType>());
// Compute the dynamic part of the shape.
bool reifiedShapes = false;
if (auto rankedOp = dyn_cast_or_null<ReifyRankedShapedTypeOpInterface>(
shapedValue.getDefiningOp())) {
ReifiedRankedShapedTypeDims resultDims;
if (succeeded(rankedOp.reifyResultShapes(b, resultDims))) {
reifiedShapes = true;
OpResult resultValue = shapedValue.dyn_cast<OpResult>();
auto &shape = resultDims[resultValue.getResultNumber()];
for (auto dim : enumerate(allocMemRefType.getShape()))
if (ShapedType::isDynamic(dim.value()))
dynShape.push_back(shape[dim.index()]);
}
}
if (!reifiedShapes) {
for (auto dim : enumerate(allocMemRefType.getShape()))
if (ShapedType::isDynamic(dim.value())) {
assert((shapedValue.getType().isa<UnrankedMemRefType>() ||
shapedValue.getType().isa<MemRefType>()) &&
"expected MemRef type");
dynShape.push_back(
b.create<memref::DimOp>(loc, shapedValue, dim.index()));
}
}
// If the buffer is statically shaped, try to hoist it to the first enclosing
// parallel region.
// TODO: also hoist in the dynamic case. For now this relies on subsequent
// calls to LICM and buffer hoisting which will most likely not succeed.
// TODO: when packing, allocate a static bounding box which will enable more
// hoisting.
if (dynShape.empty())
moveInsertionPointToAllocationHoistingBarrier(b);
return allocMemRefType;
}
/// Create an Allocop/DeAllocOp pair, where the AllocOp is after
/// `shapedValue.getDefiningOp` (or at the top of the block in case of a
/// bbArg) and the DeallocOp is at the end of the block.
static Value createNewAllocDeallocPairForShapedValue(
OpBuilder &b, Location loc, Value shapedValue, BufferizationState &state) {
// Take a guard before anything else.
OpBuilder::InsertionGuard g(b);
// 1. Create memory allocation.
assert(shapedValue.getType().isa<ShapedType>());
MemRefType memRefType = shapedValue.getType().dyn_cast<MemRefType>();
SmallVector<Value> dynShape;
// Note: getAllocationTypeAndShape also sets the insertion point.
MemRefType allocMemRefType =
getAllocationTypeAndShape(b, loc, shapedValue, dynShape);
Optional<Value> allocated =
state.allocationFns.allocationFn(b, loc, allocMemRefType, dynShape);
// TODO: For now just assert the value is returned. Eventually need to
// error-propagate.
assert(allocated && "allocation failed");
Value casted = allocated.getValue();
if (memRefType && memRefType != allocMemRefType) {
casted = b.create<memref::CastOp>(loc, memRefType, allocated.getValue());
state.aliasInfo.insertNewBufferEquivalence(casted, allocated.getValue());
}
// 2. Create memory deallocation.
b.setInsertionPoint(allocated.getValue().getParentBlock()->getTerminator());
state.allocationFns.deallocationFn(b, loc, allocated.getValue());
return casted;
}
//===----------------------------------------------------------------------===//
// Bufferization as simple BlockAndValueMapping rewrites.
//===----------------------------------------------------------------------===//
/// In a first approximation, all the function arguments of a FuncOp are marked
/// inplaceable. For now, it is the responsibility of the `callOp` bufferization
/// to allow FuncOp that are inplaceable to write inPlace.
static LogicalResult
bufferize(OpBuilder &b, CallOpInterface callOp, BufferizationState &state,
DenseMap<FuncOp, FunctionType> &bufferizedFunctionTypes) {
FuncOp funcOp = getCalledFunction(callOp);
assert(isa<CallOp>(callOp.getOperation()) && funcOp &&
"expected Callop to a FuncOp");
// If nothing to do then we are done.
if (!llvm::any_of(funcOp.getType().getInputs(), isaTensor) &&
!llvm::any_of(funcOp.getType().getResults(), isaTensor))
return success();
// Take a guard before anything else.
OpBuilder::InsertionGuard g(b);
b.setInsertionPoint(callOp);
// 1. Filter return types:
// - if the callee is bodiless / external, we cannot inspect it and we
// cannot assume anything. We can just assert that it does not return a
// tensor as this would have to bufferize to "return a memref", whose
// semantics is ill-defined.
// - if the callee has a body, we perform inter-procedural equivalence
// analysis. When successful, a result folds onto an operand. When
// unsuccessful, additional work is needed to either:
// * hoist a result into an inplaceable operand or
// * devise a better representation to truly return a buffer.
SmallVector<Type> resultTypes;
SmallVector<Value> hoistedArguments;
if (funcOp.body().empty()) {
if (llvm::any_of(funcOp.getType().getResults(), isaTensor))
return callOp->emitError()
<< "cannot bufferize bodiless function that returns a tensor";
} else {
ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
assert(returnOp && "expected func with single return op");
// For each FuncOp result, keep track of which inplace argument it reuses.
for (OpOperand &returnOperand : returnOp->getOpOperands()) {
Type returnType = returnOperand.get().getType();
if (!isaTensor(returnType)) {
resultTypes.push_back(returnType);
continue;
}
// If return operand is equivalent to some bbArg, no need to return it.
Value returnVal = returnOperand.get();
if (BlockArgument bbArg =
getEquivalentEnclosingFuncBBArg(returnVal, state.aliasInfo)) {
Value oldRes = callOp->getResult(returnOperand.getOperandNumber());
int64_t idx = bbArg.getArgNumber();
Value buffer = state.lookupBuffer(callOp->getOperand(idx));
// Add CallOp operand/result equivalence: this is interprocedural info.
state.aliasInfo.insertNewBufferEquivalence(oldRes, buffer);
state.mapBuffer(oldRes, buffer);
// Add a TensorLoadOp to kill all uses of the CallOp return.
// Replace all uses of the CallOp results so we can erase the CallOp.
// This TensorLoadOp must fold/DCE away or bufferization should be
// considered failed.
Value tensorLoad =
b.create<memref::TensorLoadOp>(callOp.getLoc(), buffer);
oldRes.replaceAllUsesWith(tensorLoad);
// Add new op equivalence info.
state.aliasInfo.insertNewBufferEquivalence(tensorLoad, buffer);
state.mapBuffer(tensorLoad, buffer);
continue;
}
// TODO: Need to hoist above function boundary.
if (Operation *allocOp = getEquivalentAlloc(returnVal, state.aliasInfo)) {
hoistedArguments.push_back(allocOp->getResult(0));
continue;
}
// Other cases legitimately need to return a tensor, this is currently not
// supported. For instance, if hoisting across function boundary has
// failed, it may be due to e.g. data-dependent sizes. In such a case, we
// would we need a better type than memref.
resultTypes.push_back(returnType);
int64_t returnIdx = returnOperand.getOperandNumber();
return returnOp->emitError()
<< "buffer result #" << returnIdx << " not produced by an alloc\n";
}
}
// 2. Compute bufferized FunctionType.
SmallVector<Type> argumentTypes{callOp->getOperandTypes()};
ValueRange hoistedArgs{hoistedArguments};
llvm::append_range(argumentTypes, hoistedArgs.getTypes());
// Get the bufferized FunctionType for funcOp or construct it if not yet
// available.
FunctionType bufferizedFuncType = getOrCreateBufferizedFunctionType(
funcOp, argumentTypes, resultTypes, bufferizedFunctionTypes);
// 3. Rewrite tensor operands as memrefs based on `bufferizedFuncType`.
SmallVector<Value> newOperands;
newOperands.reserve(callOp->getNumOperands());
for (OpOperand &opOperand : callOp->getOpOperands()) {
Value tensorOperand = opOperand.get();
// Non-tensor operands are just copied.
if (!tensorOperand.getType().isa<TensorType>()) {
newOperands.push_back(tensorOperand);
continue;
}
// Tensor operands are guaranteed to have been buferized.
int64_t idx = opOperand.getOperandNumber();
Value buffer = state.lookupBuffer(tensorOperand);
// Caller / callee type mistmatch is handled with a CastOp.
auto memRefType = bufferizedFuncType.getInput(idx);
// Since we don't yet have a clear layout story, buffer_cast may
// conservatively turn tensors into more dynamic memref than necessary.
// If the memref type of the callee fails, introduce an extra memref.cast
// that will either canonicalize away or fail compilation until we can do
// something better.
if (buffer.getType() != memRefType) {
Value castBuffer =
b.create<memref::CastOp>(callOp.getLoc(), memRefType, buffer);
// Add new op equivalence info.
state.aliasInfo.insertNewBufferEquivalence(castBuffer, buffer);
state.mapBuffer(tensorOperand, castBuffer);
buffer = castBuffer;
}
newOperands.push_back(buffer);
}
// 4. Create the new CallOp.
Operation *newCallOp = b.create<CallOp>(callOp.getLoc(), funcOp.sym_name(),
resultTypes, newOperands);
newCallOp->setAttrs(callOp->getAttrs());
// Delete the op at the end of bufferization.
return success();
}
/// FuncOp always creates TensorToMemRef ops.
static LogicalResult bufferize(OpBuilder &b, FuncOp funcOp,
BufferizationState &state) {
// Take a guard before anything else.
OpBuilder::InsertionGuard g(b);
b.setInsertionPointToStart(&funcOp.body().front());
for (auto bbArg : funcOp.getArguments()) {
auto tensorType = bbArg.getType().dyn_cast<TensorType>();
if (!tensorType)
continue;
auto rankedTensorType = tensorType.dyn_cast<RankedTensorType>();
// Cast the tensor to the most dynamic buffer possible. Further
// canonicalizations will clean up.
Type memRefType = rankedTensorType
? getDynamicMemRefType(rankedTensorType)
: getContiguousOrUnrankedMemRefType(tensorType);
Value bufferCast =
b.create<memref::BufferCastOp>(funcOp.getLoc(), memRefType, bbArg);
state.aliasInfo.insertNewBufferEquivalence(bufferCast, bbArg);
state.mapBuffer(bbArg, bufferCast);
}
return success();
}
//===----------------------------------------------------------------------===//
// Bufferization analyses.
//===----------------------------------------------------------------------===//
/// Determine if `operand` can be bufferized in-place with `result`.
static LogicalResult
bufferizableInPlaceAnalysisImpl(OpOperand &operand, OpResult result,
BufferizationAliasInfo &aliasInfo,
const DominanceInfo &domInfo) {
#ifndef NDEBUG
SmallVector<OpOperand *> opOperands = getAliasingOpOperand(result);
assert(llvm::find(opOperands, &operand) != opOperands.end() &&
"operand and result do not match");
#endif // NDEBUG
int64_t resultNumber = result.getResultNumber();
(void)resultNumber;
LDBG('\n');
LDBG("Inplace analysis for <- #" << resultNumber << " -> #"
<< operand.getOperandNumber() << " in "
<< printValueInfo(result) << '\n');
bool foundInterference =
wouldCreateWriteToNonWritableBuffer(operand, result, aliasInfo) ||
wouldCreateReadAfterWriteInterference(operand, result, domInfo,
aliasInfo);
if (foundInterference)
aliasInfo.bufferizeOutOfPlace(result);
else
aliasInfo.bufferizeInPlace(result, operand);
LDBG("Done inplace analysis for result #" << resultNumber << '\n');
return success();
}
/// Determine if `operand` can be bufferized in-place with one of the op's
/// results.
///
/// Even if an op does not read or write, it may still create an alias when
/// bufferized in-place. An example of such ops is tensor.extract_slice.
///
/// Rationale for bufferizing `%1 = tensor.extract_slice %0[...]` inplace:
///
/// When bufferized out of place, an ExtractSliceOp lowers to alloc + copy. This
/// cannot change the flow of information for either the source or the
/// result buffers.
///
/// When bufferized inplace, an ExtractSliceOp does not by itself create any
/// read or write from memory. Instead, it has the effect of merging the alias
/// sets of the source and the result buffers.
///