MLIR  17.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
15 #include "mlir/IR/BuiltinOps.h"
16 #include "mlir/IR/IRMapping.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
21 #include "llvm/Support/Debug.h"
22 
23 //===----------------------------------------------------------------------===//
24 // BufferizableOpInterface
25 //===----------------------------------------------------------------------===//
26 
27 namespace mlir {
28 namespace bufferization {
29 
30 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
31 
32 } // namespace bufferization
33 } // namespace mlir
34 
36 
37 #define DEBUG_TYPE "bufferizable-op-interface"
38 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
39 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
40 
41 using namespace mlir;
42 using namespace bufferization;
43 
44 static bool isRepetitiveRegion(Region *region,
46  Operation *op = region->getParentOp();
47  if (auto bufferizableOp = options.dynCastBufferizableOp(op))
48  if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
49  return true;
50  return false;
51 }
52 
55  if (!op->getBlock())
56  return nullptr;
58 }
59 
61  Value value, const BufferizationOptions &options) {
62  Region *region = value.getParentRegion();
63  while (region) {
64  if (isRepetitiveRegion(region, options))
65  return region;
66  region = region->getParentRegion();
67  }
68  return nullptr;
69 }
70 
72  Block *block, const BufferizationOptions &options) {
73  Region *region = block->getParent();
74  Operation *op = nullptr;
75  do {
76  op = region->getParentOp();
77  if (isRepetitiveRegion(region, options))
78  return region;
79  } while ((region = op->getParentRegion()));
80  return nullptr;
81 }
82 
84  Region *region, const BufferizationOptions &options) {
85  assert(isRepetitiveRegion(region, options) && "expected repetitive region");
86  while ((region = region->getParentRegion())) {
87  if (isRepetitiveRegion(region, options))
88  break;
89  }
90  return region;
91 }
92 
94  if (auto opResult = value.dyn_cast<OpResult>())
95  return opResult.getDefiningOp();
96  return value.cast<BlockArgument>().getOwner()->getParentOp();
97 }
98 
100 #ifndef NDEBUG
101  auto bufferizableOp = opResult.getDefiningOp<BufferizableOpInterface>();
102  assert(bufferizableOp && bufferizableOp.bufferizesToAllocation(opResult) &&
103  "expected op that bufferizes to an allocation");
104 #endif // NDEBUG
105 
106  Operation *op = opResult.getDefiningOp();
107  // If there is no 'escape' attribute, we cannot say for sure.
108  if (!op->hasAttr(BufferizationDialect::kEscapeAttrName))
109  return false;
110  auto attr =
111  op->getAttrOfType<ArrayAttr>(BufferizationDialect::kEscapeAttrName);
112  return !attr[opResult.getResultNumber()].cast<BoolAttr>().getValue();
113 }
114 
115 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
116 /// shaped value is copied. Otherwise, a tensor with undefined contents is
117 /// allocated.
119  OpBuilder &b, Location loc, Value shapedValue, bool escape,
120  const BufferizationOptions &options, bool copy) {
121  Value tensor;
122  if (shapedValue.getType().isa<RankedTensorType>()) {
123  tensor = shapedValue;
124  } else if (shapedValue.getType().isa<MemRefType>()) {
125  tensor = b.create<ToTensorOp>(loc, shapedValue);
126  } else if (shapedValue.getType().isa<UnrankedTensorType>() ||
127  shapedValue.getType().isa<UnrankedMemRefType>()) {
128  return getOwnerOfValue(shapedValue)
129  ->emitError("copying of unranked tensors is not implemented");
130  } else {
131  llvm_unreachable("expected RankedTensorType or MemRefType");
132  }
133  RankedTensorType tensorType = tensor.getType().cast<RankedTensorType>();
134  SmallVector<Value> dynamicSizes;
135  if (!copy) {
136  // Compute the dynamic part of the shape.
137  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
138  bool reifiedShapes = false;
139  if (shapedValue.getType().isa<RankedTensorType>() &&
140  shapedValue.isa<OpResult>()) {
141  ReifiedRankedShapedTypeDims resultDims;
142  if (succeeded(
143  reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
144  reifiedShapes = true;
145  auto &shape =
146  resultDims[shapedValue.cast<OpResult>().getResultNumber()];
147  for (const auto &dim : enumerate(tensorType.getShape()))
148  if (ShapedType::isDynamic(dim.value()))
149  dynamicSizes.push_back(shape[dim.index()].get<Value>());
150  }
151  }
152 
153  // If the shape could not be reified, create DimOps.
154  if (!reifiedShapes)
155  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
156  }
157 
158  // Create AllocTensorOp.
159  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
160  copy ? tensor : Value());
161  allocTensorOp->setAttr(BufferizationDialect::kEscapeAttrName,
162  b.getBoolArrayAttr({escape}));
163 
164  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
165  if (copy)
166  return allocTensorOp.getResult();
167  FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
168  if (failed(copyBufferType))
169  return failure();
170  Attribute memorySpace = copyBufferType->getMemorySpace();
171  if (!memorySpace)
172  memorySpace = b.getI64IntegerAttr(0);
173  allocTensorOp.setMemorySpaceAttr(memorySpace);
174  return allocTensorOp.getResult();
175 }
176 
177 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
178  RewriterBase &rewriter, const AnalysisState &state) {
179  OpBuilder::InsertionGuard g(rewriter);
180  Operation *op = getOperation();
181  SmallVector<OpOperand *> outOfPlaceOpOperands;
182  DenseSet<OpOperand *> copiedOpOperands;
183  DenseSet<OpOperand *> escapingOpOperandCopies;
184  SmallVector<OpResult> outOfPlaceOpResults;
185  DenseSet<OpResult> copiedOpResults;
186  DenseSet<OpResult> escapingOpResultCopies;
187 
188  // Find all out-of-place OpOperands.
189  for (OpOperand &opOperand : op->getOpOperands()) {
190  Type operandType = opOperand.get().getType();
191  if (!operandType.isa<TensorType>())
192  continue;
193  if (state.isInPlace(opOperand))
194  continue;
195  if (operandType.isa<UnrankedTensorType>())
196  return op->emitError("copying of unranked tensors is not implemented");
197 
198  AliasingOpResultList aliasingOpResults =
199  state.getAliasingOpResults(opOperand);
200  // Is the result yielded from a block? Or are deallocations turned off
201  // entirely? In either case, mark the allocation as "escaping", so that it
202  // will not be deallocated.
203  bool escape = !state.getOptions().createDeallocs ||
204  llvm::any_of(aliasingOpResults, [&](AliasingOpResult a) {
205  return state.isTensorYielded(a.opResult);
206  });
207 
208  if (aliasingOpResults.getNumAliases() == 1 &&
209  !state.bufferizesToMemoryWrite(opOperand) &&
210  state.getAliasingOpOperands(aliasingOpResults.getAliases()[0].opResult)
211  .getNumAliases() == 1 &&
212  !aliasingOpResults.getAliases()[0]
213  .opResult.getType()
214  .isa<UnrankedTensorType>()) {
215  // The op itself does not write but may create exactly one alias. Instead
216  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
217  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
218  // where the result is usually a smaller part of the source). Do not apply
219  // this optimization if the OpResult is an unranked tensor (because those
220  // cannot be copied at the moment).
221  OpResult opResult = aliasingOpResults.getAliases()[0].opResult;
222  outOfPlaceOpResults.push_back(opResult);
223  if (!state.canOmitTensorCopy(opOperand))
224  copiedOpResults.insert(opResult);
225  if (escape)
226  escapingOpResultCopies.insert(opResult);
227  } else {
228  // In all other cases, make a copy of the OpOperand.
229  outOfPlaceOpOperands.push_back(&opOperand);
230  if (!state.canOmitTensorCopy(opOperand))
231  copiedOpOperands.insert(&opOperand);
232  if (escape)
233  escapingOpOperandCopies.insert(&opOperand);
234  }
235  }
236 
237  // Insert copies of OpOperands.
238  rewriter.setInsertionPoint(op);
239  for (OpOperand *opOperand : outOfPlaceOpOperands) {
241  rewriter, op->getLoc(), opOperand->get(),
242  escapingOpOperandCopies.contains(opOperand), state.getOptions(),
243  copiedOpOperands.contains(opOperand));
244  if (failed(copy))
245  return failure();
246  rewriter.updateRootInPlace(op, [&]() { opOperand->set(*copy); });
247  }
248 
249  // Insert copies of OpResults.
250  rewriter.setInsertionPointAfter(op);
251  for (OpResult opResult : outOfPlaceOpResults) {
253  rewriter, op->getLoc(), opResult,
254  escapingOpResultCopies.contains(opResult), state.getOptions(),
255  copiedOpResults.count(opResult));
256  if (failed(copy))
257  return failure();
258  SmallVector<OpOperand *> uses = llvm::to_vector(llvm::map_range(
259  opResult.getUses(), [](OpOperand &use) { return &use; }));
260  for (OpOperand *use : uses) {
261  // Do not update the alloc_tensor op that we just created.
262  if (use->getOwner() == copy->getDefiningOp())
263  continue;
264  // tensor.dim ops may have been created to be used as alloc_tensor op
265  // dynamic extents. Do not update these either.
266  if (isa<tensor::DimOp>(use->getOwner()))
267  continue;
268  rewriter.updateRootInPlace(use->getOwner(), [&]() { use->set(*copy); });
269  }
270  }
271 
272  return success();
273 }
274 
276  OpResult opResult, const BufferizationOptions &options) {
277  Operation *op = opResult.getOwner();
278  assert(options.dynCastBufferizableOp(op).bufferizesToAllocation(opResult) &&
279  "expected that op allocates");
280 
281  AnalysisState analysisState(options);
282  if (op->hasAttr(BufferizationDialect::kEscapeAttrName)) {
283  // AllocTensorOp has one result.
284  ArrayAttr escapeAttr =
285  op->getAttr(BufferizationDialect::kEscapeAttrName).cast<ArrayAttr>();
286  return !escapeAttr[0].cast<BoolAttr>().getValue();
287  }
288 
289  // No "escape" annotation found.
290  if (options.createDeallocs) {
291  // Perform an ad-hoc analysis.
292  return !analysisState.isTensorYielded(opResult);
293  }
294 
295  return false;
296 }
297 
298 //===----------------------------------------------------------------------===//
299 // OpFilter
300 //===----------------------------------------------------------------------===//
301 
303  // All other ops: Allow/disallow according to filter.
304  bool isAllowed = !hasAllowRule();
305  for (const Entry &entry : entries) {
306  bool filterResult = entry.fn(op);
307  switch (entry.type) {
308  case Entry::ALLOW:
309  isAllowed |= filterResult;
310  break;
311  case Entry::DENY:
312  if (filterResult)
313  // DENY filter matches. This op is no allowed. (Even if other ALLOW
314  // filters may match.)
315  return false;
316  };
317  }
318  return isAllowed;
319 }
320 
321 //===----------------------------------------------------------------------===//
322 // BufferizationOptions
323 //===----------------------------------------------------------------------===//
324 
325 /// Default unknown type converter: Use a fully dynamic layout map.
326 static BaseMemRefType
328  const BufferizationOptions &options) {
330  memorySpace);
331 }
332 
333 // Default constructor for BufferizationOptions.
335  : unknownTypeConverterFn(defaultUnknownTypeConverter) {}
336 
338  // Special case: If function boundary bufferization is deactivated, do not
339  // allow ops that belong to the `func` dialect.
340  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
341  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
342  return false;
343 
344  return opFilter.isOpAllowed(op);
345 }
346 
347 BufferizableOpInterface
349  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
350  if (!bufferizableOp)
351  return nullptr;
352  if (!isOpAllowed(op))
353  return nullptr;
354  return bufferizableOp;
355 }
356 
357 BufferizableOpInterface
359  if (auto bufferizableOp = value.getDefiningOp<BufferizableOpInterface>())
360  if (isOpAllowed(bufferizableOp.getOperation()))
361  return bufferizableOp;
362  return nullptr;
363 }
364 
365 //===----------------------------------------------------------------------===//
366 // Helper functions for BufferizableOpInterface
367 //===----------------------------------------------------------------------===//
368 
369 static void setInsertionPointAfter(OpBuilder &b, Value value) {
370  if (auto bbArg = value.dyn_cast<BlockArgument>()) {
371  b.setInsertionPointToStart(bbArg.getOwner());
372  } else {
374  }
375 }
376 
377 /// Determine which OpOperand* will alias with `opResult` if the op is
378 /// bufferized in place. Return all tensor OpOperand* if the op is not
379 /// bufferizable.
382  if (Operation *op = opResult.getDefiningOp())
383  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
384  return bufferizableOp.getAliasingOpOperands(opResult, *this);
385 
386  // The op is not bufferizable.
387  return detail::unknownGetAliasingOpOperands(opResult);
388 }
389 
390 /// Determine which OpResult will alias with `opOperand` if the op is bufferized
391 /// in place. Return all tensor OpResults if the op is not bufferizable.
394  if (auto bufferizableOp =
395  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
396  return bufferizableOp.getAliasingOpResults(opOperand, *this);
397 
398  // The op is not bufferizable.
399  return detail::unknownGetAliasingOpResults(opOperand);
400 }
401 
402 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
403 /// op is not bufferizable.
405  if (auto bufferizableOp =
406  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
407  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
408 
409  // Unknown op that returns a tensor. The inplace analysis does not support it.
410  // Conservatively return true.
411  return true;
412 }
413 
414 /// Return true if `opOperand` bufferizes to a memory write. Return
415 /// `true` if the op is not bufferizable.
417  if (auto bufferizableOp =
418  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
419  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
420 
421  // Unknown op that returns a tensor. The inplace analysis does not support it.
422  // Conservatively return true.
423  return true;
424 }
425 
426 /// Return true if `opOperand` does neither read nor write but bufferizes to an
427 /// alias. Return false if the op is not bufferizable.
429  if (auto bufferizableOp =
430  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
431  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
432 
433  // Unknown op that returns a tensor. The inplace analysis does not support it.
434  // Conservatively return false.
435  return false;
436 }
437 
439  auto opResult = value.dyn_cast<OpResult>();
440  if (!opResult)
441  return true;
442  auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
443  if (!bufferizableOp)
444  return true;
445  return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
446 }
447 
448 /// Return true if the given value is read by an op that bufferizes to a memory
449 /// read. Also takes into account ops that create an alias but do not read by
450 /// themselves (e.g., ExtractSliceOp).
452  assert(value.getType().isa<TensorType>() && "expected TensorType");
453  SmallVector<OpOperand *> workingSet;
454  for (OpOperand &use : value.getUses())
455  workingSet.push_back(&use);
456 
457  while (!workingSet.empty()) {
458  OpOperand *uMaybeReading = workingSet.pop_back_val();
459  // Skip over all ops that neither read nor write (but create an alias).
460  if (bufferizesToAliasOnly(*uMaybeReading))
461  for (AliasingOpResult alias : getAliasingOpResults(*uMaybeReading))
462  for (OpOperand &use : alias.opResult.getUses())
463  workingSet.push_back(&use);
464  if (bufferizesToMemoryRead(*uMaybeReading))
465  return true;
466  }
467 
468  return false;
469 }
470 
471 // Starting from `value`, follow the use-def chain in reverse, always selecting
472 // the aliasing OpOperands. Find and return Values for which `condition`
473 // evaluates to true. OpOperands of such matching Values are not traversed any
474 // further.
476  Value value, llvm::function_ref<bool(Value)> condition,
477  bool followEquivalentOnly, bool alwaysIncludeLeaves) const {
478  llvm::SetVector<Value> result, workingSet;
479  workingSet.insert(value);
480 
481  while (!workingSet.empty()) {
482  Value value = workingSet.pop_back_val();
483  if (condition(value)) {
484  result.insert(value);
485  continue;
486  }
487 
488  if (value.isa<BlockArgument>()) {
489  if (alwaysIncludeLeaves)
490  result.insert(value);
491  continue;
492  }
493 
494  OpResult opResult = value.cast<OpResult>();
495  BufferizableOpInterface bufferizableOp =
496  options.dynCastBufferizableOp(opResult.getDefiningOp());
497  AliasingOpOperandList aliases = getAliasingOpOperands(opResult);
498 
499  // Stop iterating in either one of these cases:
500  // * The current op is not bufferizable or excluded in the filter.
501  // * There are no OpOperands to follow.
502  if (!bufferizableOp || aliases.getNumAliases() == 0) {
503  if (alwaysIncludeLeaves)
504  result.insert(value);
505  continue;
506  }
507 
508  for (AliasingOpOperand a : aliases) {
509  if (followEquivalentOnly && a.relation != BufferRelation::Equivalent) {
510  // Stop iterating if `followEquivalentOnly` is set but the alias is not
511  // equivalent.
512  if (alwaysIncludeLeaves)
513  result.insert(value);
514  } else {
515  workingSet.insert(a.opOperand->get());
516  }
517  }
518  }
519 
520  return result;
521 }
522 
523 // Find the values that define the contents of the given value.
526  value, [&](Value v) { return this->bufferizesToMemoryWrite(v); },
527  /*followEquivalentOnly=*/false, /*alwaysIncludeLeaves=*/false);
528 }
529 
532 
534  : options(options), type(type) {
536  options.stateInitializers)
537  fn(*this);
538 }
539 
541  // Do not copy if the tensor has undefined contents.
542  if (hasUndefinedContents(&opOperand))
543  return true;
544 
545  // Do not copy if the buffer of the tensor is entirely overwritten (with
546  // values that do not depend on the old tensor).
547  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
548  return true;
549 
550  // Do not copy if the tensor is never read.
551  AliasingOpResultList aliases = getAliasingOpResults(opOperand);
552  if (!bufferizesToMemoryRead(opOperand) &&
553  llvm::none_of(
554  aliases, [&](AliasingOpResult a) { return isValueRead(a.opResult); }))
555  return true;
556 
557  // Default: Cannot omit the copy.
558  return false;
559 }
560 
561 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
562  // ToMemrefOps are always in-place.
563  if (isa<ToMemrefOp>(opOperand.getOwner()))
564  return true;
565 
566  // In the absence of analysis information, OpOperands that bufferize to a
567  // memory write are out-of-place, i.e., an alloc and copy is inserted.
568  return !bufferizesToMemoryWrite(opOperand);
569 }
570 
572  // In the absence of analysis information, we do not know if the values are
573  // equivalent. The conservative answer is "false".
574  return false;
575 }
576 
578  // In the absence of analysis information, we do not know if the values may be
579  // aliasing. The conservative answer is "true".
580  return true;
581 }
582 
584  // In the absence of analysis information, the conservative answer is "false".
585  return false;
586 }
587 
589  // In the absence of analysis information, the conservative answer is "true".
590  if (!tensor.getDefiningOp<AllocTensorOp>())
591  return true;
592 
593  // For AllocTensorOp results, we can do better: They do not alias with any
594  // preceding value, so we can follow SSA use-def chains and do a simple
595  // analysis.
596  SmallVector<OpOperand *> worklist;
597  for (OpOperand &use : tensor.getUses())
598  worklist.push_back(&use);
599 
600  while (!worklist.empty()) {
601  OpOperand *operand = worklist.pop_back_val();
602  Operation *op = operand->getOwner();
603 
604  // If the op is not bufferizable, we can safely assume that the value is not
605  // yielded. (When bufferizing that op, it must handle such cases.)
606  if (!options.dynCastBufferizableOp(op))
607  continue;
608 
609  // We cannot analyze through ToMemrefOps, so we have to conservatively
610  // assume that the value is yielded.
611  if (isa<ToMemrefOp>(op))
612  return true;
613 
614  // Check if the op is returning/yielding.
615  if (isRegionReturnLike(op))
616  return true;
617 
618  // Add all aliasing OpResults to the worklist.
619  // Note: In the absence of detailed analysis information (e.g., there may be
620  // no function call analysis information), this `getAliasingOpResult` is
621  // conservative and may report additional OpResults as potentially aliasing.
622  for (AliasingOpResult alias : getAliasingOpResults(*operand))
623  for (OpOperand &use : alias.opResult.getUses())
624  worklist.push_back(&use);
625  }
626 
627  // No ReturnLike op found: The value is not yielded.
628  return false;
629 }
630 
631 // bufferization.to_memref is not allowed to change the rank.
632 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
633 #ifndef NDEBUG
634  auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>();
635  assert((!rankedTensorType || memrefType.cast<MemRefType>().getRank() ==
636  rankedTensorType.getRank()) &&
637  "to_memref would be invalid: mismatching ranks");
638 #endif
639 }
640 
642  const BufferizationOptions &options) {
643 #ifndef NDEBUG
644  auto tensorType = value.getType().dyn_cast<TensorType>();
645  assert(tensorType && "unexpected non-tensor type");
646 #endif // NDEBUG
647 
648  // Replace "%t = to_tensor %m" with %m.
649  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
650  return toTensorOp.getMemref();
651 
652  // Insert to_memref op.
653  OpBuilder::InsertionGuard g(rewriter);
654  setInsertionPointAfter(rewriter, value);
655  FailureOr<BaseMemRefType> memrefType = getBufferType(value, options);
656  if (failed(memrefType))
657  return failure();
658  ensureToMemrefOpIsValid(value, *memrefType);
659  return rewriter
660  .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
661  .getResult();
662 }
663 
664 /// Return the buffer type for a given Value (tensor) after bufferization.
668  return getBufferType(value, options, fixedTypes);
669 }
670 
671 /// Return the buffer type for a given Value (tensor) after bufferization.
673  Value value, const BufferizationOptions &options,
674  const DenseMap<Value, BaseMemRefType> &fixedTypes) {
675  assert(value.getType().isa<TensorType>() && "unexpected non-tensor type");
676 
677  // If the `value` is in `fixedTypes`, return the mapped type.
678  const auto &it = fixedTypes.find(value);
679  if (it != fixedTypes.end())
680  return it->second;
681 
682  // Try querying BufferizableOpInterface.
683  Operation *op = getOwnerOfValue(value);
684  auto bufferizableOp = options.dynCastBufferizableOp(op);
685  if (bufferizableOp)
686  return bufferizableOp.getBufferType(value, options, fixedTypes);
687 
688  // Op is not bufferizable.
689  if (!options.defaultMemorySpace.has_value())
690  return op->emitError("could not infer memory space");
691 
692  return getMemRefType(value, options, /*layout=*/{},
693  *options.defaultMemorySpace);
694 }
695 
697  Operation *op,
698  ValueRange values) {
699  assert(values.size() == op->getNumResults() &&
700  "expected one value per OpResult");
701  OpBuilder::InsertionGuard g(rewriter);
702 
703  // Replace all OpResults with the given values.
704  SmallVector<Value> replacements;
705  for (OpResult opResult : op->getOpResults()) {
706  Value replacement = values[opResult.getResultNumber()];
707  if (opResult.getType().isa<TensorType>()) {
708  // The OpResult is a tensor. Such values are replaced with memrefs during
709  // bufferization.
710  assert((replacement.getType().isa<MemRefType>() ||
711  replacement.getType().isa<UnrankedMemRefType>()) &&
712  "tensor op result should be replaced with a memref value");
713  // The existing uses of the OpResult still expect a tensor. Insert a
714  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
715  // loose all of its users and eventually DCE away.
716  rewriter.setInsertionPointAfter(op);
717  replacement = rewriter.create<bufferization::ToTensorOp>(
718  replacement.getLoc(), replacement);
719  }
720  replacements.push_back(replacement);
721  }
722 
723  rewriter.replaceOp(op, replacements);
724 }
725 
726 //===----------------------------------------------------------------------===//
727 // Bufferization-specific scoped alloc/dealloc insertion support.
728 //===----------------------------------------------------------------------===//
729 
730 /// Create a memref allocation with the given type and dynamic extents.
732  MemRefType type,
733  ValueRange dynShape) const {
734  if (allocationFn)
735  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
736 
737  // Default bufferallocation via AllocOp.
738  if (bufferAlignment != 0)
739  return b
740  .create<memref::AllocOp>(loc, type, dynShape,
742  .getResult();
743  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
744 }
745 
746 /// Creates a memref deallocation. The given memref buffer must have been
747 /// allocated using `createAlloc`.
749  Value allocatedBuffer) const {
750  if (deallocationFn)
751  return (*deallocationFn)(b, loc, allocatedBuffer);
752 
753  // Default buffer deallocation via DeallocOp.
754  b.create<memref::DeallocOp>(loc, allocatedBuffer);
755  return success();
756 }
757 
758 /// Create a memory copy between two memref buffers.
760  Value from, Value to) const {
761  if (memCpyFn)
762  return (*memCpyFn)(b, loc, from, to);
763 
764  b.create<memref::CopyOp>(loc, from, to);
765  return success();
766 }
767 
768 //===----------------------------------------------------------------------===//
769 // Bufferization-specific IRMapping support with debugging.
770 //===----------------------------------------------------------------------===//
771 
773  auto bbArg = value.dyn_cast<BlockArgument>();
774  if (!bbArg)
775  return false;
776  return isa<func::FuncOp>(bbArg.getOwner()->getParentOp());
777 }
778 
781  MemRefLayoutAttrInterface layout,
782  Attribute memorySpace) {
783  auto tensorType = value.getType().cast<TensorType>();
784 
785  // Case 1: Unranked memref type.
786  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
787  assert(!layout && "UnrankedTensorType cannot have a layout map");
788  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
789  memorySpace);
790  }
791 
792  // Case 2: Ranked memref type with specified layout.
793  auto rankedTensorType = tensorType.cast<RankedTensorType>();
794  if (layout) {
795  return MemRefType::get(rankedTensorType.getShape(),
796  rankedTensorType.getElementType(), layout,
797  memorySpace);
798  }
799 
800  return options.unknownTypeConverterFn(value, memorySpace, options);
801 }
802 
805  Attribute memorySpace) {
806  // Case 1: Unranked memref type.
807  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
808  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
809  memorySpace);
810  }
811 
812  // Case 2: Ranked memref type.
813  auto rankedTensorType = tensorType.cast<RankedTensorType>();
814  int64_t dynamicOffset = ShapedType::kDynamic;
815  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
816  ShapedType::kDynamic);
817  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
818  dynamicOffset, dynamicStrides);
819  return MemRefType::get(rankedTensorType.getShape(),
820  rankedTensorType.getElementType(), stridedLayout,
821  memorySpace);
822 }
823 
824 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
825 /// the given tensor type is unranked, return an unranked MemRef type.
828  Attribute memorySpace) {
829  // Case 1: Unranked memref type.
830  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
831  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
832  memorySpace);
833  }
834 
835  // Case 2: Ranked memref type.
836  auto rankedTensorType = tensorType.cast<RankedTensorType>();
837  MemRefLayoutAttrInterface layout = {};
838  return MemRefType::get(rankedTensorType.getShape(),
839  rankedTensorType.getElementType(), layout,
840  memorySpace);
841 }
842 
843 //===----------------------------------------------------------------------===//
844 // Default implementations of interface methods
845 //===----------------------------------------------------------------------===//
846 
848  OpResult opResult, const AnalysisState &state) {
849  auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
850  AliasingOpOperandList opOperands =
851  bufferizableOp.getAliasingOpOperands(opResult, state);
852 
853  // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
854  // memory writes.
855  if (opOperands.getAliases().empty())
856  return true;
857 
858  // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
859  // may bufferize to a memory write.
860  if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
861  return state.bufferizesToMemoryWrite(*alias.opOperand);
862  }))
863  return true;
864 
865  // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
866  // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
867  // case, the OpResult bufferizes to a memory write. E.g.:
868  //
869  // %0 = "some_writing_op" : tensor<?xf32>
870  // %r = scf.if ... -> tensor<?xf32> {
871  // scf.yield %0 : tensor<?xf32>
872  // } else {
873  // %1 = "another_writing_op"(%0) : tensor<?xf32>
874  // scf.yield %1 : tensor<?xf32>
875  // }
876  // "some_reading_op"(%r)
877  //
878  // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
879  // bufferizes to a memory write and the defining op is inside the scf.if.
880  //
881  // Note: This treatment of surrouding ops is useful for ops that have a
882  // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
883  // the analysis considerably.
884  //
885  // "another_writing_op" in the above example should be able to bufferize
886  // inplace in the absence of another read of %0. However, if the scf.if op
887  // would not be considered a "write", the analysis would detect the
888  // following conflict:
889  //
890  // * read = some_reading_op
891  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
892  // * conflictingWrite = %1
893  //
894  auto isMemoryWriteInsideOp = [&](Value v) {
895  Operation *op = getOwnerOfValue(v);
896  if (!opResult.getDefiningOp()->isAncestor(op))
897  return false;
898  return state.bufferizesToMemoryWrite(v);
899  };
900  for (AliasingOpOperand alias : opOperands) {
901  if (!state
902  .findValueInReverseUseDefChain(alias.opOperand->get(),
903  isMemoryWriteInsideOp,
904  /*followEquivalentOnly=*/false,
905  /*alwaysIncludeLeaves=*/false)
906  .empty())
907  return true;
908  }
909  return false;
910 }
911 
912 // Compute the AliasingOpOperandList for a given OpResult based on
913 // getAliasingOpResults.
915  OpResult opResult, const AnalysisState &state) {
916  Operation *op = opResult.getDefiningOp();
918  for (OpOperand &opOperand : op->getOpOperands()) {
919  if (!opOperand.get().getType().isa<TensorType>())
920  continue;
921  AliasingOpResultList aliasingOpResults =
922  state.getAliasingOpResults(opOperand);
923  for (const auto &it : aliasingOpResults)
924  if (it.opResult == opResult)
925  result.emplace_back(&opOperand, it.relation, it.isDefinite);
926  }
927  return AliasingOpOperandList(std::move(result));
928 }
929 
931  Value value, const BufferizationOptions &options,
932  const DenseMap<Value, BaseMemRefType> &fixedTypes) {
933  assert(value.getType().isa<TensorType>() && "expected tensor type");
934 
935  // No further analysis is possible for a block argument.
936  if (value.isa<BlockArgument>())
937  return bufferization::getMemRefType(value, options);
938 
939  // Value is an OpResult.
940  Operation *op = getOwnerOfValue(value);
941  auto opResult = value.cast<OpResult>();
942  AnalysisState state(options);
943  AliasingOpOperandList aliases = state.getAliasingOpOperands(opResult);
944  if (aliases.getNumAliases() > 0 &&
945  aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
946  // If the OpResult has an equivalent OpOperand, both OpResult and
947  // OpOperand bufferize to the exact same buffer type.
948  Value equivalentOperand = aliases.getAliases().front().opOperand->get();
949  return getBufferType(equivalentOperand, options, fixedTypes);
950  }
951 
952  // If we do not know the memory space and there is no default memory space,
953  // report a failure.
954  if (!options.defaultMemorySpace.has_value())
955  return op->emitError("could not infer memory space");
956 
957  return getMemRefType(value, options, /*layout=*/{},
958  *options.defaultMemorySpace);
959 }
960 
962  BufferizableOpInterface bufferizableOp, unsigned index) {
963  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
964  auto regionInterface =
965  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
966  if (!regionInterface)
967  return false;
968  return regionInterface.isRepetitiveRegion(index);
969 }
970 
973  // Conservatively assume that everything may be aliasing.
975  for (OpOperand &operand : opResult.getDefiningOp()->getOpOperands())
976  if (operand.get().getType().isa<TensorType>())
977  r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
978  return r;
979 }
980 
983  // Conservatively assume that everything may be aliasing.
985  for (OpResult result : opOperand.getOwner()->getOpResults())
986  if (result.getType().isa<TensorType>())
987  r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
988  return r;
989 }
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static BaseMemRefType defaultUnknownTypeConverter(Value value, Attribute memorySpace, const BufferizationOptions &options)
Default unknown type converter: Use a fully dynamic layout map.
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:263
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
U cast() const
Definition: Attributes.h:176
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:116
This class represents an argument of a Block.
Definition: Value.h:304
Block represents an ordered list of Operations.
Definition: Block.h:30
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:26
Special case of IntegerAttr to represent boolean integers, i.e., signless i1 integers.
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:125
ArrayAttr getBoolArrayAttr(ArrayRef< bool > values)
Definition: Builders.cpp:263
This class provides support for representing a failure result, or a valid value of type T.
Definition: LogicalResult.h:78
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:137
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:329
This class helps build Operations.
Definition: Builders.h:202
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:412
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:379
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:432
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:393
This class represents an operand of an operation.
Definition: Value.h:255
This is a value defined by a result of an operation.
Definition: Value.h:442
Operation * getOwner() const
Returns the operation that owns this result.
Definition: Value.h:451
unsigned getResultNumber() const
Returns the number of this result.
Definition: Value.h:454
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:75
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:204
AttrClass getAttrOfType(StringAttr name)
Definition: Operation.h:437
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
Definition: Operation.h:433
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
Definition: Operation.h:447
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:207
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:234
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:197
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
Definition: Operation.h:457
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:362
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition: Operation.h:247
result_range getOpResults()
Definition: Operation.h:399
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition: Operation.h:214
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:383
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition: Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition: Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition: Region.h:200
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:399
virtual void replaceOp(Operation *op, ValueRange newValues)
This method replaces the results of the operation with the specified list of values.
void updateRootInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around a root update of an operation.
Definition: PatternMatch.h:549
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:80
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:104
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
U cast() const
Definition: Types.h:321
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:35
U dyn_cast() const
Definition: Types.h:311
bool isa() const
Definition: Types.h:301
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:370
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:93
bool isa() const
Definition: Value.h:98
Type getType() const
Return the type of this value.
Definition: Value.h:122
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:201
U dyn_cast() const
Definition: Value.h:103
U cast() const
Definition: Value.h:113
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition: Value.cpp:41
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingOpOperandList getAliasingOpOperands(OpResult result) const
Determine which OpOperand* will alias with result if the op is bufferized in place.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition, bool followEquivalentOnly=false, bool alwaysIncludeLeaves=true) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands.
AnalysisState(const BufferizationOptions &options)
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
virtual bool isTensorYielded(Value tensor) const
Return true if the given tensor (or an aliasing tensor) is yielded from the containing block.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
AliasingOpResultList getAliasingOpResults(OpOperand &opOperand) const
Determine which OpResult will alias with opOperand if the op is bufferized in place.
SetVector< Value > findDefinitions(Value value) const
Find the values that may define the contents of the given value at runtime.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:40
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, const DenseMap< Value, BaseMemRefType > &fixedTypes)
This is the default implementation of BufferizableOpInterface::getBufferType.
AliasingOpOperandList defaultGetAliasingOpOperands(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
AliasingOpResultList unknownGetAliasingOpResults(OpOperand &opOperand)
This is the default implementation of getAliasingOpResults in case the owner op does not implement th...
AliasingOpOperandList unknownGetAliasingOpOperands(OpResult opResult)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
bool allocationDoesNotEscape(OpResult opResult)
Return true if the allocation of the given op is guaranteed to not escape the containing block.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
bool shouldDeallocateOpResult(OpResult opResult, const BufferizationOptions &options)
Return true if the buffer of given OpResult should be deallocated.
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, bool escape, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
bool isFunctionArgument(Value value)
Return true if the given value is a BlockArgument of a func::FuncOp.
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:223
This header declares functions that assit transformations in the MemRef dialect.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value.
Definition: LogicalResult.h:68
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
bool isRegionReturnLike(Operation *operation)
Returns true if the given operation is either annotated with the ReturnLike trait or implements the R...
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
std::optional< AllocationFn > allocationFn
Helper functions for allocation, deallocation, memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
std::optional< DeallocationFn > deallocationFn
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
LogicalResult createDealloc(OpBuilder &b, Location loc, Value allocatedBuffer) const
Creates a memref deallocation.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.