MLIR  16.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
16 #include "mlir/IR/BuiltinOps.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
21 #include "llvm/Support/Debug.h"
22 
23 //===----------------------------------------------------------------------===//
24 // BufferizableOpInterface
25 //===----------------------------------------------------------------------===//
26 
27 namespace mlir {
28 namespace bufferization {
29 
30 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
31 
32 } // namespace bufferization
33 } // namespace mlir
34 
36 
37 #define DEBUG_TYPE "bufferizable-op-interface"
38 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
39 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
40 
41 using namespace mlir;
42 using namespace bufferization;
43 
45  if (auto opResult = value.dyn_cast<OpResult>())
46  return opResult.getDefiningOp();
47  return value.cast<BlockArgument>().getOwner()->getParentOp();
48 }
49 
51 #ifndef NDEBUG
52  auto bufferizableOp = opResult.getDefiningOp<BufferizableOpInterface>();
53  assert(bufferizableOp && bufferizableOp.bufferizesToAllocation(opResult) &&
54  "expected op that bufferizes to an allocation");
55 #endif // NDEBUG
56 
57  Operation *op = opResult.getDefiningOp();
58  // If there is no 'escape' attribute, we cannot say for sure.
59  if (!op->hasAttr(BufferizationDialect::kEscapeAttrName))
60  return false;
61  auto attr =
62  op->getAttrOfType<ArrayAttr>(BufferizationDialect::kEscapeAttrName);
63  return !attr[opResult.getResultNumber()].cast<BoolAttr>().getValue();
64 }
65 
66 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
67 /// shaped value is copied. Otherwise, a tensor with undefined contents is
68 /// allocated.
70  OpBuilder &b, Location loc, Value shapedValue, bool escape,
71  const BufferizationOptions &options, bool copy) {
72  Value tensor;
73  if (shapedValue.getType().isa<RankedTensorType>()) {
74  tensor = shapedValue;
75  } else if (shapedValue.getType().isa<MemRefType>()) {
76  tensor = b.create<ToTensorOp>(loc, shapedValue);
77  } else {
78  llvm_unreachable("expected RankedTensorType or MemRefType");
79  }
80  RankedTensorType tensorType = tensor.getType().cast<RankedTensorType>();
81  SmallVector<Value> dynamicSizes;
82  if (!copy) {
83  // Compute the dynamic part of the shape.
84  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
85  bool reifiedShapes = false;
86  if (shapedValue.getType().isa<RankedTensorType>() &&
87  shapedValue.isa<OpResult>()) {
88  if (auto rankedOp = dyn_cast_or_null<ReifyRankedShapedTypeOpInterface>(
89  shapedValue.getDefiningOp())) {
90  ReifiedRankedShapedTypeDims resultDims;
91  if (succeeded(rankedOp.reifyResultShapes(b, resultDims))) {
92  reifiedShapes = true;
93  auto &shape =
94  resultDims[shapedValue.cast<OpResult>().getResultNumber()];
95  for (const auto &dim : enumerate(tensorType.getShape()))
96  if (ShapedType::isDynamic(dim.value()))
97  dynamicSizes.push_back(shape[dim.index()]);
98  }
99  }
100  }
101 
102  // If the shape could not be reified, create DimOps.
103  if (!reifiedShapes)
104  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
105  }
106 
107  // Create AllocTensorOp.
108  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
109  copy ? tensor : Value());
110  allocTensorOp->setAttr(BufferizationDialect::kEscapeAttrName,
111  b.getBoolArrayAttr({escape}));
112 
113  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
114  if (copy)
115  return allocTensorOp.getResult();
116  FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
117  if (failed(copyBufferType))
118  return failure();
119  Attribute memorySpace = copyBufferType->getMemorySpace();
120  if (!memorySpace)
121  memorySpace = b.getI64IntegerAttr(0);
122  allocTensorOp.setMemorySpaceAttr(memorySpace);
123  return allocTensorOp.getResult();
124 }
125 
126 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
127  RewriterBase &rewriter, const AnalysisState &state) {
128  OpBuilder::InsertionGuard g(rewriter);
129  Operation *op = getOperation();
130  SmallVector<OpOperand *> outOfPlaceOpOperands;
131  DenseSet<OpOperand *> copiedOpOperands;
132  DenseSet<OpOperand *> escapingOpOperandCopies;
133  SmallVector<OpResult> outOfPlaceOpResults;
134  DenseSet<OpResult> copiedOpResults;
135  DenseSet<OpResult> escapingOpResultCopies;
136 
137  // Find all out-of-place OpOperands.
138  for (OpOperand &opOperand : op->getOpOperands()) {
139  Type operandType = opOperand.get().getType();
140  if (!operandType.isa<TensorType>())
141  continue;
142  if (state.isInPlace(opOperand))
143  continue;
144  if (operandType.isa<UnrankedTensorType>())
145  return op->emitError("copies of unranked tensors are not supported");
146 
147  SmallVector<OpResult> aliasingOpResults =
148  state.getAliasingOpResult(opOperand);
149  // Is the result yielded from a block? Or are deallocations turned off
150  // entirely? In either case, mark the allocation as "escaping", so that it
151  // will not be deallocated.
152  bool escape = !state.getOptions().createDeallocs ||
153  llvm::any_of(aliasingOpResults, [&](Value v) {
154  return state.isTensorYielded(v);
155  });
156 
157  if (aliasingOpResults.size() == 1 &&
158  !state.bufferizesToMemoryWrite(opOperand) &&
159  state.getAliasingOpOperand(aliasingOpResults.front()).size() == 1) {
160  // The op itself does not write but may create exactly one alias. Instead
161  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
162  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
163  // where the result is usually a smaller part of the source).
164  outOfPlaceOpResults.push_back(aliasingOpResults.front());
165  if (!state.canOmitTensorCopy(opOperand))
166  copiedOpResults.insert(aliasingOpResults.front());
167  if (escape)
168  escapingOpResultCopies.insert(aliasingOpResults.front());
169  } else {
170  // In all other cases, make a copy of the OpOperand.
171  outOfPlaceOpOperands.push_back(&opOperand);
172  if (!state.canOmitTensorCopy(opOperand))
173  copiedOpOperands.insert(&opOperand);
174  if (escape)
175  escapingOpOperandCopies.insert(&opOperand);
176  }
177  }
178 
179  // Insert copies of OpOperands.
180  rewriter.setInsertionPoint(op);
181  for (OpOperand *opOperand : outOfPlaceOpOperands) {
183  rewriter, op->getLoc(), opOperand->get(),
184  escapingOpOperandCopies.contains(opOperand), state.getOptions(),
185  copiedOpOperands.contains(opOperand));
186  if (failed(copy))
187  return failure();
188  rewriter.updateRootInPlace(op, [&]() { opOperand->set(*copy); });
189  }
190 
191  // Insert copies of OpResults.
192  rewriter.setInsertionPointAfter(op);
193  for (OpResult opResult : outOfPlaceOpResults) {
195  rewriter, op->getLoc(), opResult,
196  escapingOpResultCopies.contains(opResult), state.getOptions(),
197  copiedOpResults.count(opResult));
198  if (failed(copy))
199  return failure();
200  SmallVector<OpOperand *> uses = llvm::to_vector(llvm::map_range(
201  opResult.getUses(), [](OpOperand &use) { return &use; }));
202  for (OpOperand *use : uses) {
203  // Do not update the alloc_tensor op that we just created.
204  if (use->getOwner() != copy->getDefiningOp())
205  rewriter.updateRootInPlace(use->getOwner(), [&]() { use->set(*copy); });
206  }
207  }
208 
209  return success();
210 }
211 
213  OpResult opResult, const BufferizationOptions &options) {
214  Operation *op = opResult.getOwner();
215  assert(options.dynCastBufferizableOp(op).bufferizesToAllocation(opResult) &&
216  "expected that op allocates");
217 
218  AnalysisState analysisState(options);
219  if (op->hasAttr(BufferizationDialect::kEscapeAttrName)) {
220  // AllocTensorOp has one result.
221  ArrayAttr escapeAttr =
222  op->getAttr(BufferizationDialect::kEscapeAttrName).cast<ArrayAttr>();
223  return !escapeAttr[0].cast<BoolAttr>().getValue();
224  }
225 
226  // No "escape" annotation found.
227  if (options.createDeallocs) {
228  // Perform an ad-hoc analysis.
229  return !analysisState.isTensorYielded(opResult);
230  }
231 
232  return false;
233 }
234 
235 //===----------------------------------------------------------------------===//
236 // OpFilter
237 //===----------------------------------------------------------------------===//
238 
240  // All other ops: Allow/disallow according to filter.
241  bool isAllowed = !hasAllowRule();
242  for (const Entry &entry : entries) {
243  bool filterResult = entry.fn(op);
244  switch (entry.type) {
245  case Entry::ALLOW:
246  isAllowed |= filterResult;
247  break;
248  case Entry::DENY:
249  if (filterResult)
250  // DENY filter matches. This op is no allowed. (Even if other ALLOW
251  // filters may match.)
252  return false;
253  };
254  }
255  return isAllowed;
256 }
257 
258 //===----------------------------------------------------------------------===//
259 // BufferizationOptions
260 //===----------------------------------------------------------------------===//
261 
262 /// Default unknown type converter: Use a fully dynamic layout map.
263 static BaseMemRefType
265  const BufferizationOptions &options) {
266  return getMemRefTypeWithFullyDynamicLayout(value.getType().cast<TensorType>(),
267  memorySpace);
268 }
269 
270 // Default constructor for BufferizationOptions.
272  : unknownTypeConverterFn(defaultUnknownTypeConverter) {}
273 
275  // Special case: If function boundary bufferization is deactivated, do not
276  // allow ops that belong to the `func` dialect.
277  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
278  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
279  return false;
280 
281  return opFilter.isOpAllowed(op);
282 }
283 
284 BufferizableOpInterface
286  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
287  if (!bufferizableOp)
288  return nullptr;
289  if (!isOpAllowed(op))
290  return nullptr;
291  return bufferizableOp;
292 }
293 
294 BufferizableOpInterface
296  if (auto bufferizableOp = value.getDefiningOp<BufferizableOpInterface>())
297  if (isOpAllowed(bufferizableOp.getOperation()))
298  return bufferizableOp;
299  return nullptr;
300 }
301 
302 //===----------------------------------------------------------------------===//
303 // Helper functions for BufferizableOpInterface
304 //===----------------------------------------------------------------------===//
305 
307  if (auto bbArg = value.dyn_cast<BlockArgument>()) {
308  b.setInsertionPointToStart(bbArg.getOwner());
309  } else {
310  b.setInsertionPointAfter(value.getDefiningOp());
311  }
312 }
313 
314 /// Determine which OpOperand* will alias with `result` if the op is bufferized
315 /// in place. Return an empty vector if the op is not bufferizable.
318  if (Operation *op = result.getDefiningOp())
319  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
320  return bufferizableOp.getAliasingOpOperand(result, *this);
321  return {};
322 }
323 
324 /// Determine which OpResult will alias with `opOperand` if the op is bufferized
325 /// in place. Return an empty vector if the op is not bufferizable.
328  if (auto bufferizableOp =
329  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
330  return bufferizableOp.getAliasingOpResult(opOperand, *this);
331  return {};
332 }
333 
334 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
335 /// op is not bufferizable.
337  if (auto bufferizableOp =
338  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
339  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
340 
341  // Unknown op that returns a tensor. The inplace analysis does not support it.
342  // Conservatively return true.
343  return true;
344 }
345 
346 /// Return true if `opOperand` bufferizes to a memory write. Return
347 /// `true` if the op is not bufferizable.
349  if (auto bufferizableOp =
350  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
351  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
352 
353  // Unknown op that returns a tensor. The inplace analysis does not support it.
354  // Conservatively return true.
355  return true;
356 }
357 
358 /// Return true if `opOperand` does neither read nor write but bufferizes to an
359 /// alias. Return false if the op is not bufferizable.
361  if (auto bufferizableOp =
362  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
363  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
364 
365  // Unknown op that returns a tensor. The inplace analysis does not support it.
366  // Conservatively return false.
367  return false;
368 }
369 
370 /// Return true if the given value is read by an op that bufferizes to a memory
371 /// read. Also takes into account ops that create an alias but do not read by
372 /// themselves (e.g., ExtractSliceOp).
374  assert(value.getType().isa<TensorType>() && "expected TensorType");
375  SmallVector<OpOperand *> workingSet;
376  for (OpOperand &use : value.getUses())
377  workingSet.push_back(&use);
378 
379  while (!workingSet.empty()) {
380  OpOperand *uMaybeReading = workingSet.pop_back_val();
381  // Skip over all ops that neither read nor write (but create an alias).
382  if (bufferizesToAliasOnly(*uMaybeReading))
383  for (OpResult opResult : getAliasingOpResult(*uMaybeReading))
384  for (OpOperand &use : opResult.getUses())
385  workingSet.push_back(&use);
386  if (bufferizesToMemoryRead(*uMaybeReading))
387  return true;
388  }
389 
390  return false;
391 }
392 
393 // Starting from `value`, follow the use-def chain in reverse, always selecting
394 // the aliasing OpOperands. Find and return Values for which `condition`
395 // evaluates to true. OpOperands of such matching Values are not traversed any
396 // further.
398  Value value, llvm::function_ref<bool(Value)> condition,
399  bool followEquivalentOnly) const {
400  llvm::SetVector<Value> result, workingSet;
401  workingSet.insert(value);
402 
403  while (!workingSet.empty()) {
404  Value value = workingSet.pop_back_val();
405  if (condition(value) || value.isa<BlockArgument>()) {
406  result.insert(value);
407  continue;
408  }
409 
410  OpResult opResult = value.cast<OpResult>();
411  BufferizableOpInterface bufferizableOp =
412  options.dynCastBufferizableOp(opResult.getDefiningOp());
413  SmallVector<OpOperand *> opOperands = getAliasingOpOperand(opResult);
414 
415  // Stop iterating in either one of these cases:
416  // * The current op is not bufferizable or excluded in the filter.
417  // * There are no OpOperands to follow.
418  // * There is an OpOperand, but it is not an equivalent tensor (only if
419  // `followEquivalentOnly` is set).
420  if (!bufferizableOp || opOperands.empty() ||
421  (followEquivalentOnly &&
422  bufferizableOp.bufferRelation(opResult, *this) !=
424  result.insert(value);
425  continue;
426  }
427 
428  for (OpOperand *o : opOperands)
429  workingSet.insert(o->get());
430  }
431 
432  return result;
433 }
434 
435 // Find the Values of the last preceding write of a given Value.
439  Operation *op = value.getDefiningOp();
440  if (!op)
441  return true;
442  auto bufferizableOp = options.dynCastBufferizableOp(op);
443  if (!bufferizableOp)
444  return true;
445  return bufferizableOp.isMemoryWrite(value.cast<OpResult>(), *this);
446  });
447 }
448 
451 
453  : options(options), type(type) {
455  options.stateInitializers)
456  fn(*this);
457 }
458 
460  // Do not copy if the tensor has undefined contents.
461  if (hasUndefinedContents(&opOperand))
462  return true;
463 
464  // Do not copy if the buffer of the tensor is entirely overwritten (with
465  // values that do not depend on the old tensor).
466  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
467  return true;
468 
469  // Do not copy if the tensor is never read.
470  SmallVector<OpResult> aliasingOpResults = getAliasingOpResult(opOperand);
471  if (!bufferizesToMemoryRead(opOperand) &&
472  llvm::none_of(aliasingOpResults,
473  [&](OpResult opResult) { return isValueRead(opResult); }))
474  return true;
475 
476  // Default: Cannot omit the copy.
477  return false;
478 }
479 
480 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
481  // ToMemrefOps are always in-place.
482  if (isa<ToMemrefOp>(opOperand.getOwner()))
483  return true;
484 
485  // In the absence of analysis information, OpOperands that bufferize to a
486  // memory write are out-of-place, i.e., an alloc and copy is inserted.
487  return !bufferizesToMemoryWrite(opOperand);
488 }
489 
491  // In the absence of analysis information, we do not know if the values are
492  // equivalent. The conservative answer is "false".
493  return false;
494 }
495 
497  // In the absence of analysis information, we do not know if the values may be
498  // aliasing. The conservative answer is "true".
499  return true;
500 }
501 
503  // In the absence of analysis information, the conservative answer is "false".
504  return false;
505 }
506 
508  // In the absence of analysis information, the conservative answer is "true".
509  if (!tensor.getDefiningOp<AllocTensorOp>())
510  return true;
511 
512  // For AllocTensorOp results, we can do better: They do not alias with any
513  // preceding value, so we can follow SSA use-def chains and do a simple
514  // analysis.
515  SmallVector<OpOperand *> worklist;
516  for (OpOperand &use : tensor.getUses())
517  worklist.push_back(&use);
518 
519  while (!worklist.empty()) {
520  OpOperand *operand = worklist.pop_back_val();
521  Operation *op = operand->getOwner();
522 
523  // If the op is not bufferizable, we can safely assume that the value is not
524  // yielded. (When bufferizing that op, it must handle such cases.)
525  if (!options.dynCastBufferizableOp(op))
526  continue;
527 
528  // We cannot analyze through ToMemrefOps, so we have to conservatively
529  // assume that the value is yielded.
530  if (isa<ToMemrefOp>(op))
531  return true;
532 
533  // Check if the op is returning/yielding.
534  if (isRegionReturnLike(op))
535  return true;
536 
537  // Add all aliasing OpResults to the worklist.
538  // Note: In the absence of detailed analysis information (e.g., there may be
539  // no function call analysis information), this `getAliasingOpResult` is
540  // conservative and may report additional OpResults as potentially aliasing.
541  for (OpResult opResult : getAliasingOpResult(*operand))
542  for (OpOperand &use : opResult.getUses())
543  worklist.push_back(&use);
544  }
545 
546  // No ReturnLike op found: The value is not yielded.
547  return false;
548 }
549 
550 // bufferization.to_memref is not allowed to change the rank.
551 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
552 #ifndef NDEBUG
553  auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>();
554  assert((!rankedTensorType || memrefType.cast<MemRefType>().getRank() ==
555  rankedTensorType.getRank()) &&
556  "to_memref would be invalid: mismatching ranks");
557 #endif
558 }
559 
561  const BufferizationOptions &options) {
562 #ifndef NDEBUG
563  auto tensorType = value.getType().dyn_cast<TensorType>();
564  assert(tensorType && "unexpected non-tensor type");
565 #endif // NDEBUG
566 
567  // Replace "%t = to_tensor %m" with %m.
568  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
569  return toTensorOp.getMemref();
570 
571  // Insert to_memref op.
572  OpBuilder::InsertionGuard g(rewriter);
573  setInsertionPointAfter(rewriter, value);
575  if (failed(memrefType))
576  return failure();
577  ensureToMemrefOpIsValid(value, *memrefType);
578  return rewriter
579  .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
580  .getResult();
581 }
582 
585  const DenseMap<Value, BaseMemRefType> &fixedTypes) {
586  assert(value.getType().isa<TensorType>() && "expected tensor type");
587 
588  // No further analysis is possible for a block argument.
589  if (value.isa<BlockArgument>())
591 
592  // Value is an OpResult.
594  auto opResult = value.cast<OpResult>();
595  auto bufferizableOp = cast<BufferizableOpInterface>(op);
596  AnalysisState state(options);
597  auto aliasingOperands = bufferizableOp.getAliasingOpOperand(opResult, state);
598  if (!aliasingOperands.empty() &&
599  bufferizableOp.bufferRelation(opResult, state) ==
601  // If the OpResult has an equivalent OpOperand, both OpResult and
602  // OpOperand bufferize to the exact same buffer type.
603  Value equivalentOperand = aliasingOperands.front()->get();
604  return getBufferType(equivalentOperand, options, fixedTypes);
605  }
606 
607  // If we do not know the memory space and there is no default memory space,
608  // report a failure.
609  if (!options.defaultMemorySpace.has_value())
610  return op->emitError("could not infer memory space");
611 
612  return getMemRefType(value, options, /*layout=*/{},
613  *options.defaultMemorySpace);
614 }
615 
616 /// Return the buffer type for a given Value (tensor) after bufferization.
620  return getBufferType(value, options, fixedTypes);
621 }
622 
623 /// Return the buffer type for a given Value (tensor) after bufferization.
626  const DenseMap<Value, BaseMemRefType> &fixedTypes) {
627  assert(value.getType().isa<TensorType>() && "unexpected non-tensor type");
628 
629  // If the `value` is in `fixedTypes`, return the mapped type.
630  const auto &it = fixedTypes.find(value);
631  if (it != fixedTypes.end())
632  return it->second;
633 
634  // Try querying BufferizableOpInterface.
636  auto bufferizableOp = options.dynCastBufferizableOp(op);
637  if (bufferizableOp)
638  return bufferizableOp.getBufferType(value, options, fixedTypes);
639 
640  // Op is not bufferizable.
641  if (!options.defaultMemorySpace.has_value())
642  return op->emitError("could not infer memory space");
643 
644  return getMemRefType(value, options, /*layout=*/{},
645  *options.defaultMemorySpace);
646 }
647 
649  Operation *op,
650  ValueRange values) {
651  assert(values.size() == op->getNumResults() &&
652  "expected one value per OpResult");
653  OpBuilder::InsertionGuard g(rewriter);
654 
655  // Replace all OpResults with the given values.
656  SmallVector<Value> replacements;
657  for (OpResult opResult : op->getOpResults()) {
658  Value replacement = values[opResult.getResultNumber()];
659  if (opResult.getType().isa<TensorType>()) {
660  // The OpResult is a tensor. Such values are replaced with memrefs during
661  // bufferization.
662  assert((replacement.getType().isa<MemRefType>() ||
663  replacement.getType().isa<UnrankedMemRefType>()) &&
664  "tensor op result should be replaced with a memref value");
665  // The existing uses of the OpResult still expect a tensor. Insert a
666  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
667  // loose all of its users and eventually DCE away.
668  rewriter.setInsertionPointAfter(op);
669  replacement = rewriter.create<bufferization::ToTensorOp>(
670  replacement.getLoc(), replacement);
671  }
672  replacements.push_back(replacement);
673  }
674 
675  rewriter.replaceOp(op, replacements);
676 }
677 
678 //===----------------------------------------------------------------------===//
679 // Bufferization-specific scoped alloc/dealloc insertion support.
680 //===----------------------------------------------------------------------===//
681 
682 /// Create a memref allocation with the given type and dynamic extents.
684  MemRefType type,
685  ValueRange dynShape) const {
686  if (allocationFn)
687  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
688 
689  // Default bufferallocation via AllocOp.
690  if (bufferAlignment != 0)
691  return b
692  .create<memref::AllocOp>(loc, type, dynShape,
694  .getResult();
695  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
696 }
697 
698 /// Creates a memref deallocation. The given memref buffer must have been
699 /// allocated using `createAlloc`.
701  Value allocatedBuffer) const {
702  if (deallocationFn)
703  return (*deallocationFn)(b, loc, allocatedBuffer);
704 
705  // Default buffer deallocation via DeallocOp.
706  b.create<memref::DeallocOp>(loc, allocatedBuffer);
707  return success();
708 }
709 
710 /// Create a memory copy between two memref buffers.
712  Value from, Value to) const {
713  if (memCpyFn)
714  return (*memCpyFn)(b, loc, from, to);
715 
716  b.create<memref::CopyOp>(loc, from, to);
717  return success();
718 }
719 
720 //===----------------------------------------------------------------------===//
721 // Bufferization-specific BlockAndValueMapping support with debugging.
722 //===----------------------------------------------------------------------===//
723 
725  auto bbArg = value.dyn_cast<BlockArgument>();
726  if (!bbArg)
727  return false;
728  return isa<func::FuncOp>(bbArg.getOwner()->getParentOp());
729 }
730 
733  MemRefLayoutAttrInterface layout,
734  Attribute memorySpace) {
735  auto tensorType = value.getType().cast<TensorType>();
736 
737  // Case 1: Unranked memref type.
738  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
739  assert(!layout && "UnrankedTensorType cannot have a layout map");
740  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
741  memorySpace);
742  }
743 
744  // Case 2: Ranked memref type with specified layout.
745  auto rankedTensorType = tensorType.cast<RankedTensorType>();
746  if (layout) {
747  return MemRefType::get(rankedTensorType.getShape(),
748  rankedTensorType.getElementType(), layout,
749  memorySpace);
750  }
751 
752  return options.unknownTypeConverterFn(value, memorySpace, options);
753 }
754 
757  Attribute memorySpace) {
758  // Case 1: Unranked memref type.
759  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
760  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
761  memorySpace);
762  }
763 
764  // Case 2: Ranked memref type.
765  auto rankedTensorType = tensorType.cast<RankedTensorType>();
766  int64_t dynamicOffset = ShapedType::kDynamic;
767  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
768  ShapedType::kDynamic);
769  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
770  dynamicOffset, dynamicStrides);
771  return MemRefType::get(rankedTensorType.getShape(),
772  rankedTensorType.getElementType(), stridedLayout,
773  memorySpace);
774 }
775 
776 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
777 /// the given tensor type is unranked, return an unranked MemRef type.
780  Attribute memorySpace) {
781  // Case 1: Unranked memref type.
782  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
783  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
784  memorySpace);
785  }
786 
787  // Case 2: Ranked memref type.
788  auto rankedTensorType = tensorType.cast<RankedTensorType>();
789  MemRefLayoutAttrInterface layout = {};
790  return MemRefType::get(rankedTensorType.getShape(),
791  rankedTensorType.getElementType(), layout,
792  memorySpace);
793 }
794 
796  BufferizableOpInterface bufferizableOp, unsigned index) {
797  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
798  auto regionInterface =
799  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
800  if (!regionInterface)
801  return false;
802  return regionInterface.isRepetitiveRegion(index);
803 }
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static BaseMemRefType defaultUnknownTypeConverter(Value value, Attribute memorySpace, const BufferizationOptions &options)
Default unknown type converter: Use a fully dynamic layout map.
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static constexpr const bool value
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:263
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
U cast() const
Definition: Attributes.h:137
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:114
This class represents an argument of a Block.
Definition: Value.h:296
Special case of IntegerAttr to represent boolean integers, i.e., signless i1 integers.
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:113
ArrayAttr getBoolArrayAttr(ArrayRef< bool > values)
Definition: Builders.cpp:251
This class provides support for representing a failure result, or a valid value of type T.
Definition: LogicalResult.h:78
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:64
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:300
This class helps build Operations.
Definition: Builders.h:198
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:383
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:350
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:422
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:364
This class represents an operand of an operation.
Definition: Value.h:247
This is a value defined by a result of an operation.
Definition: Value.h:442
Operation * getOwner() const
Returns the operation that owns this result.
Definition: Value.h:451
unsigned getResultNumber() const
Returns the number of this result.
Definition: Value.h:454
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:31
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:151
AttrClass getAttrOfType(StringAttr name)
Definition: Operation.h:375
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
Definition: Operation.h:371
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
Definition: Operation.h:385
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:154
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:225
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
Definition: Operation.h:395
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:300
result_range getOpResults()
Definition: Operation.h:337
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:321
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:398
virtual void replaceOp(Operation *op, ValueRange newValues)
This method replaces the results of the operation with the specified list of values.
void updateRootInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around a root update of an operation.
Definition: PatternMatch.h:499
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:78
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:104
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
U cast() const
Definition: Types.h:280
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:19
U dyn_cast() const
Definition: Types.h:270
bool isa() const
Definition: Types.h:260
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:349
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:85
bool isa() const
Definition: Value.h:90
Type getType() const
Return the type of this value.
Definition: Value.h:114
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:193
U cast() const
Definition: Value.h:105
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition, bool followEquivalentOnly=false) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AnalysisState(const BufferizationOptions &options)
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
SetVector< Value > findLastPrecedingWrite(Value value) const
Find the Values of the last preceding write of a given Value.
virtual bool isTensorYielded(Value tensor) const
Return true if the given tensor (or an aliasing tensor) is yielded from the containing block.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
SmallVector< OpOperand * > getAliasingOpOperand(OpResult result) const
Determine which OpOperand* will alias with result if the op is bufferized in place.
SmallVector< OpResult > getAliasingOpResult(OpOperand &opOperand) const
Determine which OpResult will alias with opOperand if the op is bufferized in place.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:40
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, const DenseMap< Value, BaseMemRefType > &fixedTypes)
This is the default implementation of BufferizableOpInterface::getBufferType.
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
bool allocationDoesNotEscape(OpResult opResult)
Return true if the allocation of the given op is guaranteed to not escape the containing block.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
bool shouldDeallocateOpResult(OpResult opResult, const BufferizationOptions &options)
Return true if the buffer of given OpResult should be deallocated.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, bool escape, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
bool isFunctionArgument(Value value)
Return true if the given value is a BlockArgument of a func::FuncOp.
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:230
Include the generated interface declarations.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value.
Definition: LogicalResult.h:68
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
bool isRegionReturnLike(Operation *operation)
Returns true if the given operation is either annotated with the ReturnLike trait or implements the R...
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
Options for BufferizableOpInterface-based bufferization.
Optional< AllocationFn > allocationFn
Helper functions for allocation, deallocation, memory copying.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
LogicalResult createDealloc(OpBuilder &b, Location loc, Value allocatedBuffer) const
Creates a memref deallocation.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.