MLIR  20.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
15 #include "mlir/IR/BuiltinOps.h"
16 #include "mlir/IR/IRMapping.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
21 #include "llvm/ADT/ScopeExit.h"
22 #include "llvm/Support/Debug.h"
23 
24 //===----------------------------------------------------------------------===//
25 // BufferizableOpInterface
26 //===----------------------------------------------------------------------===//
27 
28 namespace mlir {
29 namespace bufferization {
30 
31 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
32 
33 } // namespace bufferization
34 } // namespace mlir
35 
37 
38 #define DEBUG_TYPE "bufferizable-op-interface"
39 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
40 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
41 
42 using namespace mlir;
43 using namespace bufferization;
44 
45 static bool isRepetitiveRegion(Region *region,
47  Operation *op = region->getParentOp();
48  if (auto bufferizableOp = options.dynCastBufferizableOp(op))
49  if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
50  return true;
51  return false;
52 }
53 
56  if (!op->getBlock())
57  return nullptr;
58  if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
59  iter != enclosingRepetitiveRegionCache.end())
60  return iter->second;
61  return enclosingRepetitiveRegionCache[op] =
62  getEnclosingRepetitiveRegion(op->getBlock(), options);
63 }
64 
66  Value value, const BufferizationOptions &options) {
67  if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
68  iter != enclosingRepetitiveRegionCache.end())
69  return iter->second;
70 
71  Region *region = value.getParentRegion();
72  // Collect all visited regions since we only know the repetitive region we
73  // want to map it to later on
74  SmallVector<Region *> visitedRegions;
75  while (region) {
76  visitedRegions.push_back(region);
77  if (isRepetitiveRegion(region, options))
78  break;
79  region = region->getParentRegion();
80  }
81  enclosingRepetitiveRegionCache[value] = region;
82  for (Region *r : visitedRegions)
83  enclosingRepetitiveRegionCache[r] = region;
84  return region;
85 }
86 
88  Block *block, const BufferizationOptions &options) {
89  if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
90  iter != enclosingRepetitiveRegionCache.end())
91  return iter->second;
92 
93  Region *region = block->getParent();
94  Operation *op = nullptr;
95  // Collect all visited regions since we only know the repetitive region we
96  // want to map it to later on
97  SmallVector<Region *> visitedRegions;
98  do {
99  op = region->getParentOp();
100  if (isRepetitiveRegion(region, options))
101  break;
102  } while ((region = op->getParentRegion()));
103 
104  enclosingRepetitiveRegionCache[block] = region;
105  for (Region *r : visitedRegions)
106  enclosingRepetitiveRegionCache[r] = region;
107  return region;
108 }
109 
110 void AnalysisState::resetCache() { enclosingRepetitiveRegionCache.clear(); }
111 
113  Region *region, const BufferizationOptions &options) {
114  assert(isRepetitiveRegion(region, options) && "expected repetitive region");
115  while ((region = region->getParentRegion())) {
116  if (isRepetitiveRegion(region, options))
117  break;
118  }
119  return region;
120 }
121 
123  const BufferizationOptions &options) {
124  while (region) {
125  auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
126  if (bufferizableOp &&
127  bufferizableOp.isParallelRegion(region->getRegionNumber())) {
128  assert(isRepetitiveRegion(region, options) &&
129  "expected that all parallel regions are also repetitive regions");
130  return region;
131  }
132  region = region->getParentRegion();
133  }
134  return nullptr;
135 }
136 
138  if (auto opResult = llvm::dyn_cast<OpResult>(value))
139  return opResult.getDefiningOp();
140  return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
141 }
142 
143 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
144 /// shaped value is copied. Otherwise, a tensor with undefined contents is
145 /// allocated.
147  OpBuilder &b, Location loc, Value shapedValue,
148  const BufferizationOptions &options, bool copy) {
149  Value tensor;
150  if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
151  tensor = shapedValue;
152  } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
153  tensor = b.create<ToTensorOp>(loc, shapedValue);
154  } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
155  llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
156  return getOwnerOfValue(shapedValue)
157  ->emitError("copying of unranked tensors is not implemented");
158  } else {
159  llvm_unreachable("expected RankedTensorType or MemRefType");
160  }
161  RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
162  SmallVector<Value> dynamicSizes;
163  if (!copy) {
164  // Compute the dynamic part of the shape.
165  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
166  bool reifiedShapes = false;
167  if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
168  llvm::isa<OpResult>(shapedValue)) {
169  ReifiedRankedShapedTypeDims resultDims;
170  if (succeeded(
171  reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
172  reifiedShapes = true;
173  auto &shape =
174  resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
175  for (const auto &dim : enumerate(tensorType.getShape()))
176  if (ShapedType::isDynamic(dim.value()))
177  dynamicSizes.push_back(shape[dim.index()].get<Value>());
178  }
179  }
180 
181  // If the shape could not be reified, create DimOps.
182  if (!reifiedShapes)
183  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
184  }
185 
186  // Create AllocTensorOp.
187  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
188  copy ? tensor : Value());
189 
190  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
191  if (copy)
192  return allocTensorOp.getResult();
193  FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
194  if (failed(copyBufferType))
195  return failure();
196  std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
197  if (!memorySpace)
198  memorySpace = options.defaultMemorySpaceFn(tensorType);
199  if (memorySpace.has_value())
200  allocTensorOp.setMemorySpaceAttr(memorySpace.value());
201  return allocTensorOp.getResult();
202 }
203 
204 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
205  RewriterBase &rewriter, const AnalysisState &state) {
206  OpBuilder::InsertionGuard g(rewriter);
207  Operation *op = getOperation();
208  SmallVector<OpOperand *> outOfPlaceOpOperands;
209  DenseSet<OpOperand *> copiedOpOperands;
210  SmallVector<Value> outOfPlaceValues;
211  DenseSet<Value> copiedOpValues;
212 
213  // Find all out-of-place OpOperands.
214  for (OpOperand &opOperand : op->getOpOperands()) {
215  Type operandType = opOperand.get().getType();
216  if (!llvm::isa<TensorType>(operandType))
217  continue;
218  if (state.isInPlace(opOperand))
219  continue;
220  if (llvm::isa<UnrankedTensorType>(operandType))
221  return op->emitError("copying of unranked tensors is not implemented");
222 
223  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
224  if (aliasingValues.getNumAliases() == 1 &&
225  isa<OpResult>(aliasingValues.getAliases()[0].value) &&
226  !state.bufferizesToMemoryWrite(opOperand) &&
227  state.getAliasingOpOperands(aliasingValues.getAliases()[0].value)
228  .getNumAliases() == 1 &&
229  !isa<UnrankedTensorType>(
230  aliasingValues.getAliases()[0].value.getType())) {
231  // The op itself does not write but may create exactly one alias. Instead
232  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
233  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
234  // where the result is usually a smaller part of the source). Do not apply
235  // this optimization if the OpResult is an unranked tensor (because those
236  // cannot be copied at the moment).
237  Value value = aliasingValues.getAliases()[0].value;
238  outOfPlaceValues.push_back(value);
239  if (!state.canOmitTensorCopy(opOperand))
240  copiedOpValues.insert(value);
241  } else {
242  // In all other cases, make a copy of the OpOperand.
243  outOfPlaceOpOperands.push_back(&opOperand);
244  if (!state.canOmitTensorCopy(opOperand))
245  copiedOpOperands.insert(&opOperand);
246  }
247  }
248 
249  // Insert copies of OpOperands.
250  rewriter.setInsertionPoint(op);
251  for (OpOperand *opOperand : outOfPlaceOpOperands) {
252  FailureOr<Value> copy = allocateTensorForShapedValue(
253  rewriter, op->getLoc(), opOperand->get(), state.getOptions(),
254  copiedOpOperands.contains(opOperand));
255  if (failed(copy))
256  return failure();
257  rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
258  }
259 
260  // Insert copies of Values.
261  rewriter.setInsertionPointAfter(op);
262  for (Value value : outOfPlaceValues) {
263  FailureOr<Value> copy = allocateTensorForShapedValue(
264  rewriter, op->getLoc(), value, state.getOptions(),
265  copiedOpValues.count(value));
266  if (failed(copy))
267  return failure();
268  SmallVector<OpOperand *> uses = llvm::to_vector(
269  llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
270  for (OpOperand *use : uses) {
271  // Do not update the alloc_tensor op that we just created.
272  if (use->getOwner() == copy->getDefiningOp())
273  continue;
274  // tensor.dim ops may have been created to be used as alloc_tensor op
275  // dynamic extents. Do not update these either.
276  if (isa<tensor::DimOp>(use->getOwner()))
277  continue;
278  rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
279  }
280  }
281 
282  return success();
283 }
284 
285 //===----------------------------------------------------------------------===//
286 // OpFilter
287 //===----------------------------------------------------------------------===//
288 
290  // All other ops: Allow/disallow according to filter.
291  bool isAllowed = !hasAllowRule();
292  for (const Entry &entry : entries) {
293  bool filterResult = entry.fn(op);
294  switch (entry.type) {
295  case Entry::ALLOW:
296  isAllowed |= filterResult;
297  break;
298  case Entry::DENY:
299  if (filterResult)
300  // DENY filter matches. This op is no allowed. (Even if other ALLOW
301  // filters may match.)
302  return false;
303  };
304  }
305  return isAllowed;
306 }
307 
308 //===----------------------------------------------------------------------===//
309 // BufferizationOptions
310 //===----------------------------------------------------------------------===//
311 
312 namespace {
313 
314 /// Default function arg type converter: Use a fully dynamic layout map.
316 defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace,
317  func::FuncOp funcOp,
318  const BufferizationOptions &options) {
319  return getMemRefTypeWithFullyDynamicLayout(type, memorySpace);
320 }
321 /// Default unknown type converter: Use a fully dynamic layout map.
323 defaultUnknownTypeConverter(Value value, Attribute memorySpace,
324  const BufferizationOptions &options) {
326  llvm::cast<TensorType>(value.getType()), memorySpace);
327 }
328 
329 } // namespace
330 
331 // Default constructor for BufferizationOptions.
333  : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
334  unknownTypeConverterFn(defaultUnknownTypeConverter) {}
335 
337  // Special case: If function boundary bufferization is deactivated, do not
338  // allow ops that belong to the `func` dialect.
339  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
340  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
341  return false;
342 
343  return opFilter.isOpAllowed(op);
344 }
345 
346 BufferizableOpInterface
348  if (!isOpAllowed(op))
349  return nullptr;
350  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
351  if (!bufferizableOp)
352  return nullptr;
353  return bufferizableOp;
354 }
355 
356 BufferizableOpInterface
358  return dynCastBufferizableOp(getOwnerOfValue(value));
359 }
360 
362  LayoutMapOption layoutMapOption) {
363  functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace,
364  func::FuncOp funcOp,
365  const BufferizationOptions &options) {
366  if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
368  memorySpace);
370  memorySpace);
371  };
373  layoutMapOption == LayoutMapOption::InferLayoutMap;
374 }
375 
376 //===----------------------------------------------------------------------===//
377 // Helper functions for BufferizableOpInterface
378 //===----------------------------------------------------------------------===//
379 
380 static void setInsertionPointAfter(OpBuilder &b, Value value) {
381  if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
382  b.setInsertionPointToStart(bbArg.getOwner());
383  } else {
385  }
386 }
387 
388 /// Determine which OpOperand* will alias with `value` if the op is bufferized
389 /// in place. Return all tensor OpOperand* if the op is not bufferizable.
391  if (Operation *op = getOwnerOfValue(value))
392  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
393  return bufferizableOp.getAliasingOpOperands(value, *this);
394 
395  // The op is not bufferizable.
397 }
398 
399 /// Determine which Values will alias with `opOperand` if the op is bufferized
400 /// in place. Return all tensor Values if the op is not bufferizable.
402  if (auto bufferizableOp =
403  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
404  return bufferizableOp.getAliasingValues(opOperand, *this);
405 
406  // The op is not bufferizable.
407  return detail::unknownGetAliasingValues(opOperand);
408 }
409 
410 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
411 /// op is not bufferizable.
413  if (auto bufferizableOp =
414  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
415  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
416 
417  // Unknown op that returns a tensor. The inplace analysis does not support it.
418  // Conservatively return true.
419  return true;
420 }
421 
422 /// Return true if `opOperand` bufferizes to a memory write. Return
423 /// `true` if the op is not bufferizable.
425  if (auto bufferizableOp =
426  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
427  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
428 
429  // Unknown op that returns a tensor. The inplace analysis does not support it.
430  // Conservatively return true.
431  return true;
432 }
433 
434 /// Return true if `opOperand` does neither read nor write but bufferizes to an
435 /// alias. Return false if the op is not bufferizable.
437  if (auto bufferizableOp =
438  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
439  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
440 
441  // Unknown op that returns a tensor. The inplace analysis does not support it.
442  // Conservatively return false.
443  return false;
444 }
445 
447  auto opResult = llvm::dyn_cast<OpResult>(value);
448  if (!opResult)
449  return true;
450  auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
451  if (!bufferizableOp)
452  return true;
453  return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
454 }
455 
456 /// Return true if the given value is read by an op that bufferizes to a memory
457 /// read. Also takes into account ops that create an alias but do not read by
458 /// themselves (e.g., ExtractSliceOp).
460  assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
461  SmallVector<OpOperand *> workingSet;
462  DenseSet<OpOperand *> visited;
463  for (OpOperand &use : value.getUses())
464  workingSet.push_back(&use);
465 
466  while (!workingSet.empty()) {
467  OpOperand *uMaybeReading = workingSet.pop_back_val();
468  if (!visited.insert(uMaybeReading).second)
469  continue;
470 
471  // Skip over all ops that neither read nor write (but create an alias).
472  if (bufferizesToAliasOnly(*uMaybeReading))
473  for (AliasingValue alias : getAliasingValues(*uMaybeReading))
474  for (OpOperand &use : alias.value.getUses())
475  workingSet.push_back(&use);
476  if (bufferizesToMemoryRead(*uMaybeReading))
477  return true;
478  }
479 
480  return false;
481 }
482 
483 // Starting from `value`, follow the use-def chain in reverse, always selecting
484 // the aliasing OpOperands. Find and return Values for which `condition`
485 // evaluates to true. OpOperands of such matching Values are not traversed any
486 // further.
488  Value value, llvm::function_ref<bool(Value)> condition,
489  TraversalConfig config) const {
490  llvm::DenseSet<Value> visited;
491  llvm::SetVector<Value> result, workingSet;
492  workingSet.insert(value);
493 
494  while (!workingSet.empty()) {
495  Value value = workingSet.pop_back_val();
496 
497  if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
498  // Stop traversal if value was already visited.
499  if (config.alwaysIncludeLeaves)
500  result.insert(value);
501  continue;
502  }
503  visited.insert(value);
504 
505  if (condition(value)) {
506  result.insert(value);
507  continue;
508  }
509 
510  if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
511  // Stop iterating if `followUnknownOps` is unset and the op is either
512  // not bufferizable or excluded in the OpFilter.
513  if (config.alwaysIncludeLeaves)
514  result.insert(value);
515  continue;
516  }
517 
519  if (aliases.getNumAliases() == 0) {
520  // The traversal ends naturally if there are no more OpOperands that
521  // could be followed.
522  if (config.alwaysIncludeLeaves)
523  result.insert(value);
524  continue;
525  }
526 
527  for (AliasingOpOperand a : aliases) {
528  if (config.followEquivalentOnly &&
529  a.relation != BufferRelation::Equivalent) {
530  // Stop iterating if `followEquivalentOnly` is set but the alias is not
531  // equivalent.
532  if (config.alwaysIncludeLeaves)
533  result.insert(value);
534  continue;
535  }
536 
537  if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
538  // Stop iterating if `followInPlaceOnly` is set but the alias is
539  // out-of-place.
540  if (config.alwaysIncludeLeaves)
541  result.insert(value);
542  continue;
543  }
544 
545  if (config.followSameTypeOrCastsOnly &&
546  a.opOperand->get().getType() != value.getType() &&
547  !value.getDefiningOp<CastOpInterface>()) {
548  // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
549  // has a different type and the op is not a cast.
550  if (config.alwaysIncludeLeaves)
551  result.insert(value);
552  continue;
553  }
554 
555  workingSet.insert(a.opOperand->get());
556  }
557  }
558 
559  return result;
560 }
561 
562 // Find the values that define the contents of the given value.
564  TraversalConfig config;
565  config.alwaysIncludeLeaves = false;
567  value, [&](Value v) { return this->bufferizesToMemoryWrite(v); }, config);
568 }
569 
572 
574  : options(options), type(type) {
576  options.stateInitializers)
577  fn(*this);
578 }
579 
581  // Do not copy if the tensor has undefined contents.
582  if (hasUndefinedContents(&opOperand))
583  return true;
584 
585  // Do not copy if the buffer of the tensor is entirely overwritten (with
586  // values that do not depend on the old tensor).
587  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
588  return true;
589 
590  // Do not copy if the tensor is never read.
591  AliasingValueList aliases = getAliasingValues(opOperand);
592  if (!bufferizesToMemoryRead(opOperand) &&
593  llvm::none_of(aliases,
594  [&](AliasingValue a) { return isValueRead(a.value); }))
595  return true;
596 
597  // Default: Cannot omit the copy.
598  return false;
599 }
600 
601 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
602  // ToMemrefOps are always in-place.
603  if (isa<ToMemrefOp>(opOperand.getOwner()))
604  return true;
605 
606  // In the absence of analysis information, OpOperands that bufferize to a
607  // memory write are out-of-place, i.e., an alloc and copy is inserted.
608  return !bufferizesToMemoryWrite(opOperand);
609 }
610 
612  // In the absence of analysis information, we do not know if the values are
613  // equivalent. The conservative answer is "false".
614  return false;
615 }
616 
618  // In the absence of analysis information, we do not know if the values may be
619  // aliasing. The conservative answer is "true".
620  return true;
621 }
622 
624  // In the absence of analysis information, the conservative answer is "false".
625  return false;
626 }
627 
628 // bufferization.to_memref is not allowed to change the rank.
629 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
630 #ifndef NDEBUG
631  auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.getType());
632  assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
633  rankedTensorType.getRank()) &&
634  "to_memref would be invalid: mismatching ranks");
635 #endif
636 }
637 
638 FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
639  const BufferizationOptions &options) {
640 #ifndef NDEBUG
641  auto tensorType = llvm::dyn_cast<TensorType>(value.getType());
642  assert(tensorType && "unexpected non-tensor type");
643 #endif // NDEBUG
644 
645  // Replace "%t = to_tensor %m" with %m.
646  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
647  return toTensorOp.getMemref();
648 
649  // Insert to_memref op.
650  OpBuilder::InsertionGuard g(rewriter);
651  setInsertionPointAfter(rewriter, value);
652  FailureOr<BaseMemRefType> memrefType = getBufferType(value, options);
653  if (failed(memrefType))
654  return failure();
655  ensureToMemrefOpIsValid(value, *memrefType);
656  return rewriter
657  .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
658  .getResult();
659 }
660 
661 /// Return the buffer type for a given Value (tensor) after bufferization.
662 FailureOr<BaseMemRefType>
664  SmallVector<Value> invocationStack;
665  return getBufferType(value, options, invocationStack);
666 }
667 
668 /// Return the buffer type for a given Value (tensor) after bufferization.
669 FailureOr<BaseMemRefType>
671  SmallVector<Value> &invocationStack) {
672  assert(llvm::isa<TensorType>(value.getType()) &&
673  "unexpected non-tensor type");
674  invocationStack.push_back(value);
675  auto popFromStack =
676  llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
677 
678  // Try querying BufferizableOpInterface.
679  Operation *op = getOwnerOfValue(value);
680  auto bufferizableOp = options.dynCastBufferizableOp(op);
681  if (bufferizableOp)
682  return bufferizableOp.getBufferType(value, options, invocationStack);
683 
684  // Op is not bufferizable.
685  auto memSpace =
686  options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
687  if (!memSpace.has_value())
688  return op->emitError("could not infer memory space");
689 
690  return getMemRefType(value, options, /*layout=*/{}, *memSpace);
691 }
692 
694  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
695  return bufferizableOp.hasTensorSemantics();
697 }
698 
700  Operation *op,
701  ValueRange values) {
702  assert(values.size() == op->getNumResults() &&
703  "expected one value per OpResult");
704  OpBuilder::InsertionGuard g(rewriter);
705 
706  // Replace all OpResults with the given values.
707  SmallVector<Value> replacements;
708  for (OpResult opResult : op->getOpResults()) {
709  Value replacement = values[opResult.getResultNumber()];
710  if (llvm::isa<TensorType>(opResult.getType())) {
711  // The OpResult is a tensor. Such values are replaced with memrefs during
712  // bufferization.
713  assert((llvm::isa<MemRefType>(replacement.getType()) ||
714  llvm::isa<UnrankedMemRefType>(replacement.getType())) &&
715  "tensor op result should be replaced with a memref value");
716  // The existing uses of the OpResult still expect a tensor. Insert a
717  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
718  // loose all of its users and eventually DCE away.
719  rewriter.setInsertionPointAfter(op);
720  replacement = rewriter.create<bufferization::ToTensorOp>(
721  replacement.getLoc(), replacement);
722  }
723  replacements.push_back(replacement);
724  }
725 
726  rewriter.replaceOp(op, replacements);
727 }
728 
729 //===----------------------------------------------------------------------===//
730 // Bufferization-specific scoped alloc insertion support.
731 //===----------------------------------------------------------------------===//
732 
733 /// Create a memref allocation with the given type and dynamic extents.
735  MemRefType type,
736  ValueRange dynShape) const {
737  if (allocationFn)
738  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
739 
740  // Default bufferallocation via AllocOp.
741  if (bufferAlignment != 0)
742  return b
743  .create<memref::AllocOp>(loc, type, dynShape,
745  .getResult();
746  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
747 }
748 
749 /// Create a memory copy between two memref buffers.
751  Value from, Value to) const {
752  if (memCpyFn)
753  return (*memCpyFn)(b, loc, from, to);
754 
755  b.create<memref::CopyOp>(loc, from, to);
756  return success();
757 }
758 
759 //===----------------------------------------------------------------------===//
760 // Bufferization-specific IRMapping support with debugging.
761 //===----------------------------------------------------------------------===//
762 
765  MemRefLayoutAttrInterface layout,
766  Attribute memorySpace) {
767  auto tensorType = llvm::cast<TensorType>(value.getType());
768 
769  // Case 1: Unranked memref type.
770  if (auto unrankedTensorType =
771  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
772  assert(!layout && "UnrankedTensorType cannot have a layout map");
773  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
774  memorySpace);
775  }
776 
777  // Case 2: Ranked memref type with specified layout.
778  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
779  if (layout) {
780  return MemRefType::get(rankedTensorType.getShape(),
781  rankedTensorType.getElementType(), layout,
782  memorySpace);
783  }
784 
785  return options.unknownTypeConverterFn(value, memorySpace, options);
786 }
787 
790  Attribute memorySpace) {
791  // Case 1: Unranked memref type.
792  if (auto unrankedTensorType =
793  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
794  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
795  memorySpace);
796  }
797 
798  // Case 2: Ranked memref type.
799  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
800  int64_t dynamicOffset = ShapedType::kDynamic;
801  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
802  ShapedType::kDynamic);
803  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
804  dynamicOffset, dynamicStrides);
805  return MemRefType::get(rankedTensorType.getShape(),
806  rankedTensorType.getElementType(), stridedLayout,
807  memorySpace);
808 }
809 
810 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
811 /// the given tensor type is unranked, return an unranked MemRef type.
814  Attribute memorySpace) {
815  // Case 1: Unranked memref type.
816  if (auto unrankedTensorType =
817  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
818  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
819  memorySpace);
820  }
821 
822  // Case 2: Ranked memref type.
823  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
824  MemRefLayoutAttrInterface layout = {};
825  return MemRefType::get(rankedTensorType.getShape(),
826  rankedTensorType.getElementType(), layout,
827  memorySpace);
828 }
829 
830 //===----------------------------------------------------------------------===//
831 // Default implementations of interface methods
832 //===----------------------------------------------------------------------===//
833 
835  OpResult opResult, const AnalysisState &state) {
836  auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
837  AliasingOpOperandList opOperands =
838  bufferizableOp.getAliasingOpOperands(opResult, state);
839 
840  // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
841  // memory writes.
842  if (opOperands.getAliases().empty())
843  return true;
844 
845  // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
846  // may bufferize to a memory write.
847  if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
848  return state.bufferizesToMemoryWrite(*alias.opOperand);
849  }))
850  return true;
851 
852  // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
853  // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
854  // case, the OpResult bufferizes to a memory write. E.g.:
855  //
856  // %0 = "some_writing_op" : tensor<?xf32>
857  // %r = scf.if ... -> tensor<?xf32> {
858  // scf.yield %0 : tensor<?xf32>
859  // } else {
860  // %1 = "another_writing_op"(%0) : tensor<?xf32>
861  // scf.yield %1 : tensor<?xf32>
862  // }
863  // "some_reading_op"(%r)
864  //
865  // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
866  // bufferizes to a memory write and the defining op is inside the scf.if.
867  //
868  // Note: This treatment of surrouding ops is useful for ops that have a
869  // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
870  // the analysis considerably.
871  //
872  // "another_writing_op" in the above example should be able to bufferize
873  // inplace in the absence of another read of %0. However, if the scf.if op
874  // would not be considered a "write", the analysis would detect the
875  // following conflict:
876  //
877  // * read = some_reading_op
878  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
879  // * conflictingWrite = %1
880  //
881  auto isMemoryWriteInsideOp = [&](Value v) {
882  Operation *op = getOwnerOfValue(v);
883  if (!opResult.getDefiningOp()->isAncestor(op))
884  return false;
885  return state.bufferizesToMemoryWrite(v);
886  };
887  TraversalConfig config;
888  config.alwaysIncludeLeaves = false;
889  for (AliasingOpOperand alias : opOperands) {
890  if (!state
891  .findValueInReverseUseDefChain(alias.opOperand->get(),
892  isMemoryWriteInsideOp, config)
893  .empty())
894  return true;
895  }
896  return false;
897 }
898 
899 // Compute the AliasingOpOperandList for a given Value based on
900 // getAliasingValues.
902  Value value, const AnalysisState &state) {
903  Operation *op = getOwnerOfValue(value);
905  for (OpOperand &opOperand : op->getOpOperands()) {
906  if (!llvm::isa<TensorType>(opOperand.get().getType()))
907  continue;
908  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
909  for (const auto &it : aliasingValues)
910  if (it.value == value)
911  result.emplace_back(&opOperand, it.relation, it.isDefinite);
912  }
913  return AliasingOpOperandList(std::move(result));
914 }
915 
917  Value value, const BufferizationOptions &options,
918  SmallVector<Value> &invocationStack) {
919  assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
920 
921  // No further analysis is possible for a block argument.
922  if (llvm::isa<BlockArgument>(value))
923  return bufferization::getMemRefType(value, options);
924 
925  // Value is an OpResult.
926  Operation *op = getOwnerOfValue(value);
927  auto opResult = llvm::cast<OpResult>(value);
928  AnalysisState state(options);
929  AliasingOpOperandList aliases = state.getAliasingOpOperands(opResult);
930  if (aliases.getNumAliases() > 0 &&
931  aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
932  // If the OpResult has an equivalent OpOperand, both OpResult and
933  // OpOperand bufferize to the exact same buffer type.
934  Value equivalentOperand = aliases.getAliases().front().opOperand->get();
935  return getBufferType(equivalentOperand, options, invocationStack);
936  }
937 
938  // If we do not know the memory space and there is no default memory space,
939  // report a failure.
940  auto memSpace =
941  options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
942  if (!memSpace.has_value())
943  return op->emitError("could not infer memory space");
944 
945  return getMemRefType(value, options, /*layout=*/{}, *memSpace);
946 }
947 
949  BufferizableOpInterface bufferizableOp, unsigned index) {
950  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
951  auto regionInterface =
952  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
953  if (!regionInterface)
954  return false;
955  return regionInterface.isRepetitiveRegion(index);
956 }
957 
960  // TODO: Take into account successor blocks.
961  // No aliasing in case of non-entry blocks.
962  if (auto bbArg = dyn_cast<BlockArgument>(value))
963  if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
964  return {};
965 
966  // Unknown op: Conservatively assume that each OpResult may alias with every
967  // OpOperand. In addition, each block argument of an entry block may alias
968  // with every OpOperand.
970  for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
971  if (isa<TensorType>(operand.get().getType()))
972  r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
973  return r;
974 }
975 
978  // TODO: Take into account successor blocks.
979  // Unknown op: Conservatively assume that each OpResult may alias with every
980  // OpOperand. In addition, each block argument of an entry block may alias
981  // with every OpOperand.
983  for (OpResult result : opOperand.getOwner()->getOpResults())
984  if (llvm::isa<TensorType>(result.getType()))
985  r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
986  for (Region &region : opOperand.getOwner()->getRegions())
987  if (!region.getBlocks().empty())
988  for (BlockArgument bbArg : region.getBlocks().front().getArguments())
989  if (isa<TensorType>(bbArg.getType()))
990  r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
991  return r;
992 }
993 
995  auto isaTensor = [](Type t) { return isa<TensorType>(t); };
996  bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
997  return any_of(r.getBlocks(), [&](Block &b) {
998  return any_of(b.getArguments(), [&](BlockArgument bbArg) {
999  return isaTensor(bbArg.getType());
1000  });
1001  });
1002  });
1003  if (hasTensorBlockArgument)
1004  return true;
1005 
1006  if (any_of(op->getResultTypes(), isaTensor))
1007  return true;
1008  return any_of(op->getOperandTypes(), isaTensor);
1009 }
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:263
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:149
This class represents an argument of a Block.
Definition: Value.h:319
Block represents an ordered list of Operations.
Definition: Block.h:31
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:26
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:152
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:356
This class helps build Operations.
Definition: Builders.h:215
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:439
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:406
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:420
This class represents an operand of an operation.
Definition: Value.h:267
This is a value defined by a result of an operation.
Definition: Value.h:457
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition: Operation.h:234
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:213
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:672
operand_type_range getOperandTypes()
Definition: Operation.h:392
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:378
result_type_range getResultTypes()
Definition: Operation.h:423
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition: Operation.h:263
result_range getOpResults()
Definition: Operation.h:415
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition: Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:399
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition: Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition: Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition: Region.h:200
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:636
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:102
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:104
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:212
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition: Value.cpp:41
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig()) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands.
SetVector< Value > findDefinitions(Value value) const
Find the values that may define the contents of the given value at runtime.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.
bool followUnknownOps
Specifies whether unknown/non-bufferizable/ops not included in the OpFilter of BufferizationOptions s...
bool alwaysIncludeLeaves
Specifies if leaves (that do not have further OpOperands to follow) should be returned even if they d...
bool followSameTypeOrCastsOnly
Specifies whether OpOperands with a different type that are not the result of a CastOpInterface op sh...
bool followInPlaceOnly
Specifies whether out-of-place/undecided OpOperands should be followed.
bool followEquivalentOnly
Specifies whether non-equivalent OpOperands should be followed.
bool revisitAlreadyVisitedValues
Specifies whether already visited values should be visited again.