MLIR  19.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
15 #include "mlir/IR/BuiltinOps.h"
16 #include "mlir/IR/IRMapping.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
21 #include "llvm/ADT/ScopeExit.h"
22 #include "llvm/Support/Debug.h"
23 
24 //===----------------------------------------------------------------------===//
25 // BufferizableOpInterface
26 //===----------------------------------------------------------------------===//
27 
28 namespace mlir {
29 namespace bufferization {
30 
31 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
32 
33 } // namespace bufferization
34 } // namespace mlir
35 
37 
38 #define DEBUG_TYPE "bufferizable-op-interface"
39 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
40 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
41 
42 using namespace mlir;
43 using namespace bufferization;
44 
45 static bool isRepetitiveRegion(Region *region,
47  Operation *op = region->getParentOp();
48  if (auto bufferizableOp = options.dynCastBufferizableOp(op))
49  if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
50  return true;
51  return false;
52 }
53 
56  if (!op->getBlock())
57  return nullptr;
58  if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
59  iter != enclosingRepetitiveRegionCache.end())
60  return iter->second;
61  return enclosingRepetitiveRegionCache[op] =
62  getEnclosingRepetitiveRegion(op->getBlock(), options);
63 }
64 
66  Value value, const BufferizationOptions &options) {
67  if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
68  iter != enclosingRepetitiveRegionCache.end())
69  return iter->second;
70 
71  Region *region = value.getParentRegion();
72  // Collect all visited regions since we only know the repetitive region we
73  // want to map it to later on
74  SmallVector<Region *> visitedRegions;
75  while (region) {
76  visitedRegions.push_back(region);
77  if (isRepetitiveRegion(region, options))
78  break;
79  region = region->getParentRegion();
80  }
81  enclosingRepetitiveRegionCache[value] = region;
82  for (Region *r : visitedRegions)
83  enclosingRepetitiveRegionCache[r] = region;
84  return region;
85 }
86 
88  Block *block, const BufferizationOptions &options) {
89  if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
90  iter != enclosingRepetitiveRegionCache.end())
91  return iter->second;
92 
93  Region *region = block->getParent();
94  Operation *op = nullptr;
95  // Collect all visited regions since we only know the repetitive region we
96  // want to map it to later on
97  SmallVector<Region *> visitedRegions;
98  do {
99  op = region->getParentOp();
100  if (isRepetitiveRegion(region, options))
101  break;
102  } while ((region = op->getParentRegion()));
103 
104  enclosingRepetitiveRegionCache[block] = region;
105  for (Region *r : visitedRegions)
106  enclosingRepetitiveRegionCache[r] = region;
107  return region;
108 }
109 
110 void AnalysisState::resetCache() { enclosingRepetitiveRegionCache.clear(); }
111 
113  Region *region, const BufferizationOptions &options) {
114  assert(isRepetitiveRegion(region, options) && "expected repetitive region");
115  while ((region = region->getParentRegion())) {
116  if (isRepetitiveRegion(region, options))
117  break;
118  }
119  return region;
120 }
121 
123  const BufferizationOptions &options) {
124  while (region) {
125  auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
126  if (bufferizableOp &&
127  bufferizableOp.isParallelRegion(region->getRegionNumber())) {
128  assert(isRepetitiveRegion(region, options) &&
129  "expected that all parallel regions are also repetitive regions");
130  return region;
131  }
132  region = region->getParentRegion();
133  }
134  return nullptr;
135 }
136 
138  if (auto opResult = llvm::dyn_cast<OpResult>(value))
139  return opResult.getDefiningOp();
140  return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
141 }
142 
143 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
144 /// shaped value is copied. Otherwise, a tensor with undefined contents is
145 /// allocated.
147  OpBuilder &b, Location loc, Value shapedValue,
148  const BufferizationOptions &options, bool copy) {
149  Value tensor;
150  if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
151  tensor = shapedValue;
152  } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
153  tensor = b.create<ToTensorOp>(loc, shapedValue);
154  } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
155  llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
156  return getOwnerOfValue(shapedValue)
157  ->emitError("copying of unranked tensors is not implemented");
158  } else {
159  llvm_unreachable("expected RankedTensorType or MemRefType");
160  }
161  RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
162  SmallVector<Value> dynamicSizes;
163  if (!copy) {
164  // Compute the dynamic part of the shape.
165  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
166  bool reifiedShapes = false;
167  if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
168  llvm::isa<OpResult>(shapedValue)) {
169  ReifiedRankedShapedTypeDims resultDims;
170  if (succeeded(
171  reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
172  reifiedShapes = true;
173  auto &shape =
174  resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
175  for (const auto &dim : enumerate(tensorType.getShape()))
176  if (ShapedType::isDynamic(dim.value()))
177  dynamicSizes.push_back(shape[dim.index()].get<Value>());
178  }
179  }
180 
181  // If the shape could not be reified, create DimOps.
182  if (!reifiedShapes)
183  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
184  }
185 
186  // Create AllocTensorOp.
187  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
188  copy ? tensor : Value());
189 
190  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
191  if (copy)
192  return allocTensorOp.getResult();
193  FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
194  if (failed(copyBufferType))
195  return failure();
196  Attribute memorySpace = copyBufferType->getMemorySpace();
197  if (!memorySpace)
198  memorySpace = b.getI64IntegerAttr(0);
199  allocTensorOp.setMemorySpaceAttr(memorySpace);
200  return allocTensorOp.getResult();
201 }
202 
203 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
204  RewriterBase &rewriter, const AnalysisState &state) {
205  OpBuilder::InsertionGuard g(rewriter);
206  Operation *op = getOperation();
207  SmallVector<OpOperand *> outOfPlaceOpOperands;
208  DenseSet<OpOperand *> copiedOpOperands;
209  SmallVector<Value> outOfPlaceValues;
210  DenseSet<Value> copiedOpValues;
211 
212  // Find all out-of-place OpOperands.
213  for (OpOperand &opOperand : op->getOpOperands()) {
214  Type operandType = opOperand.get().getType();
215  if (!llvm::isa<TensorType>(operandType))
216  continue;
217  if (state.isInPlace(opOperand))
218  continue;
219  if (llvm::isa<UnrankedTensorType>(operandType))
220  return op->emitError("copying of unranked tensors is not implemented");
221 
222  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
223  if (aliasingValues.getNumAliases() == 1 &&
224  isa<OpResult>(aliasingValues.getAliases()[0].value) &&
225  !state.bufferizesToMemoryWrite(opOperand) &&
226  state.getAliasingOpOperands(aliasingValues.getAliases()[0].value)
227  .getNumAliases() == 1 &&
228  !isa<UnrankedTensorType>(
229  aliasingValues.getAliases()[0].value.getType())) {
230  // The op itself does not write but may create exactly one alias. Instead
231  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
232  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
233  // where the result is usually a smaller part of the source). Do not apply
234  // this optimization if the OpResult is an unranked tensor (because those
235  // cannot be copied at the moment).
236  Value value = aliasingValues.getAliases()[0].value;
237  outOfPlaceValues.push_back(value);
238  if (!state.canOmitTensorCopy(opOperand))
239  copiedOpValues.insert(value);
240  } else {
241  // In all other cases, make a copy of the OpOperand.
242  outOfPlaceOpOperands.push_back(&opOperand);
243  if (!state.canOmitTensorCopy(opOperand))
244  copiedOpOperands.insert(&opOperand);
245  }
246  }
247 
248  // Insert copies of OpOperands.
249  rewriter.setInsertionPoint(op);
250  for (OpOperand *opOperand : outOfPlaceOpOperands) {
252  rewriter, op->getLoc(), opOperand->get(), state.getOptions(),
253  copiedOpOperands.contains(opOperand));
254  if (failed(copy))
255  return failure();
256  rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
257  }
258 
259  // Insert copies of Values.
260  rewriter.setInsertionPointAfter(op);
261  for (Value value : outOfPlaceValues) {
263  rewriter, op->getLoc(), value, state.getOptions(),
264  copiedOpValues.count(value));
265  if (failed(copy))
266  return failure();
267  SmallVector<OpOperand *> uses = llvm::to_vector(
268  llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
269  for (OpOperand *use : uses) {
270  // Do not update the alloc_tensor op that we just created.
271  if (use->getOwner() == copy->getDefiningOp())
272  continue;
273  // tensor.dim ops may have been created to be used as alloc_tensor op
274  // dynamic extents. Do not update these either.
275  if (isa<tensor::DimOp>(use->getOwner()))
276  continue;
277  rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
278  }
279  }
280 
281  return success();
282 }
283 
284 //===----------------------------------------------------------------------===//
285 // OpFilter
286 //===----------------------------------------------------------------------===//
287 
289  // All other ops: Allow/disallow according to filter.
290  bool isAllowed = !hasAllowRule();
291  for (const Entry &entry : entries) {
292  bool filterResult = entry.fn(op);
293  switch (entry.type) {
294  case Entry::ALLOW:
295  isAllowed |= filterResult;
296  break;
297  case Entry::DENY:
298  if (filterResult)
299  // DENY filter matches. This op is no allowed. (Even if other ALLOW
300  // filters may match.)
301  return false;
302  };
303  }
304  return isAllowed;
305 }
306 
307 //===----------------------------------------------------------------------===//
308 // BufferizationOptions
309 //===----------------------------------------------------------------------===//
310 
311 namespace {
312 
313 /// Default function arg type converter: Use a fully dynamic layout map.
315 defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace,
316  func::FuncOp funcOp,
317  const BufferizationOptions &options) {
318  return getMemRefTypeWithFullyDynamicLayout(type, memorySpace);
319 }
320 /// Default unknown type converter: Use a fully dynamic layout map.
322 defaultUnknownTypeConverter(Value value, Attribute memorySpace,
323  const BufferizationOptions &options) {
325  llvm::cast<TensorType>(value.getType()), memorySpace);
326 }
327 
328 } // namespace
329 
330 // Default constructor for BufferizationOptions.
332  : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
333  unknownTypeConverterFn(defaultUnknownTypeConverter) {}
334 
336  // Special case: If function boundary bufferization is deactivated, do not
337  // allow ops that belong to the `func` dialect.
338  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
339  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
340  return false;
341 
342  return opFilter.isOpAllowed(op);
343 }
344 
345 BufferizableOpInterface
347  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
348  if (!bufferizableOp)
349  return nullptr;
350  if (!isOpAllowed(op))
351  return nullptr;
352  return bufferizableOp;
353 }
354 
355 BufferizableOpInterface
357  return dynCastBufferizableOp(getOwnerOfValue(value));
358 }
359 
361  LayoutMapOption layoutMapOption) {
362  functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace,
363  func::FuncOp funcOp,
364  const BufferizationOptions &options) {
365  if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
367  memorySpace);
369  memorySpace);
370  };
372  layoutMapOption == LayoutMapOption::InferLayoutMap;
373 }
374 
375 //===----------------------------------------------------------------------===//
376 // Helper functions for BufferizableOpInterface
377 //===----------------------------------------------------------------------===//
378 
379 static void setInsertionPointAfter(OpBuilder &b, Value value) {
380  if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
381  b.setInsertionPointToStart(bbArg.getOwner());
382  } else {
384  }
385 }
386 
387 /// Determine which OpOperand* will alias with `value` if the op is bufferized
388 /// in place. Return all tensor OpOperand* if the op is not bufferizable.
390  if (Operation *op = getOwnerOfValue(value))
391  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
392  return bufferizableOp.getAliasingOpOperands(value, *this);
393 
394  // The op is not bufferizable.
396 }
397 
398 /// Determine which Values will alias with `opOperand` if the op is bufferized
399 /// in place. Return all tensor Values if the op is not bufferizable.
401  if (auto bufferizableOp =
402  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
403  return bufferizableOp.getAliasingValues(opOperand, *this);
404 
405  // The op is not bufferizable.
406  return detail::unknownGetAliasingValues(opOperand);
407 }
408 
409 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
410 /// op is not bufferizable.
412  if (auto bufferizableOp =
413  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
414  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
415 
416  // Unknown op that returns a tensor. The inplace analysis does not support it.
417  // Conservatively return true.
418  return true;
419 }
420 
421 /// Return true if `opOperand` bufferizes to a memory write. Return
422 /// `true` if the op is not bufferizable.
424  if (auto bufferizableOp =
425  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
426  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
427 
428  // Unknown op that returns a tensor. The inplace analysis does not support it.
429  // Conservatively return true.
430  return true;
431 }
432 
433 /// Return true if `opOperand` does neither read nor write but bufferizes to an
434 /// alias. Return false if the op is not bufferizable.
436  if (auto bufferizableOp =
437  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
438  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
439 
440  // Unknown op that returns a tensor. The inplace analysis does not support it.
441  // Conservatively return false.
442  return false;
443 }
444 
446  auto opResult = llvm::dyn_cast<OpResult>(value);
447  if (!opResult)
448  return true;
449  auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
450  if (!bufferizableOp)
451  return true;
452  return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
453 }
454 
455 /// Return true if the given value is read by an op that bufferizes to a memory
456 /// read. Also takes into account ops that create an alias but do not read by
457 /// themselves (e.g., ExtractSliceOp).
459  assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
460  SmallVector<OpOperand *> workingSet;
461  DenseSet<OpOperand *> visited;
462  for (OpOperand &use : value.getUses())
463  workingSet.push_back(&use);
464 
465  while (!workingSet.empty()) {
466  OpOperand *uMaybeReading = workingSet.pop_back_val();
467  if (visited.contains(uMaybeReading))
468  continue;
469  visited.insert(uMaybeReading);
470 
471  // Skip over all ops that neither read nor write (but create an alias).
472  if (bufferizesToAliasOnly(*uMaybeReading))
473  for (AliasingValue alias : getAliasingValues(*uMaybeReading))
474  for (OpOperand &use : alias.value.getUses())
475  workingSet.push_back(&use);
476  if (bufferizesToMemoryRead(*uMaybeReading))
477  return true;
478  }
479 
480  return false;
481 }
482 
483 // Starting from `value`, follow the use-def chain in reverse, always selecting
484 // the aliasing OpOperands. Find and return Values for which `condition`
485 // evaluates to true. OpOperands of such matching Values are not traversed any
486 // further.
488  Value value, llvm::function_ref<bool(Value)> condition,
489  TraversalConfig config) const {
490  llvm::DenseSet<Value> visited;
491  llvm::SetVector<Value> result, workingSet;
492  workingSet.insert(value);
493 
494  while (!workingSet.empty()) {
495  Value value = workingSet.pop_back_val();
496 
497  if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
498  // Stop traversal if value was already visited.
499  if (config.alwaysIncludeLeaves)
500  result.insert(value);
501  continue;
502  }
503  visited.insert(value);
504 
505  if (condition(value)) {
506  result.insert(value);
507  continue;
508  }
509 
510  if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
511  // Stop iterating if `followUnknownOps` is unset and the op is either
512  // not bufferizable or excluded in the OpFilter.
513  if (config.alwaysIncludeLeaves)
514  result.insert(value);
515  continue;
516  }
517 
519  if (aliases.getNumAliases() == 0) {
520  // The traversal ends naturally if there are no more OpOperands that
521  // could be followed.
522  if (config.alwaysIncludeLeaves)
523  result.insert(value);
524  continue;
525  }
526 
527  for (AliasingOpOperand a : aliases) {
528  if (config.followEquivalentOnly &&
529  a.relation != BufferRelation::Equivalent) {
530  // Stop iterating if `followEquivalentOnly` is set but the alias is not
531  // equivalent.
532  if (config.alwaysIncludeLeaves)
533  result.insert(value);
534  continue;
535  }
536 
537  if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
538  // Stop iterating if `followInPlaceOnly` is set but the alias is
539  // out-of-place.
540  if (config.alwaysIncludeLeaves)
541  result.insert(value);
542  continue;
543  }
544 
545  if (config.followSameTypeOrCastsOnly &&
546  a.opOperand->get().getType() != value.getType() &&
547  !value.getDefiningOp<CastOpInterface>()) {
548  // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
549  // has a different type and the op is not a cast.
550  if (config.alwaysIncludeLeaves)
551  result.insert(value);
552  continue;
553  }
554 
555  workingSet.insert(a.opOperand->get());
556  }
557  }
558 
559  return result;
560 }
561 
562 // Find the values that define the contents of the given value.
564  TraversalConfig config;
565  config.alwaysIncludeLeaves = false;
567  value, [&](Value v) { return this->bufferizesToMemoryWrite(v); }, config);
568 }
569 
572 
574  : options(options), type(type) {
576  options.stateInitializers)
577  fn(*this);
578 }
579 
581  // Do not copy if the tensor has undefined contents.
582  if (hasUndefinedContents(&opOperand))
583  return true;
584 
585  // Do not copy if the buffer of the tensor is entirely overwritten (with
586  // values that do not depend on the old tensor).
587  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
588  return true;
589 
590  // Do not copy if the tensor is never read.
591  AliasingValueList aliases = getAliasingValues(opOperand);
592  if (!bufferizesToMemoryRead(opOperand) &&
593  llvm::none_of(aliases,
594  [&](AliasingValue a) { return isValueRead(a.value); }))
595  return true;
596 
597  // Default: Cannot omit the copy.
598  return false;
599 }
600 
601 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
602  // ToMemrefOps are always in-place.
603  if (isa<ToMemrefOp>(opOperand.getOwner()))
604  return true;
605 
606  // In the absence of analysis information, OpOperands that bufferize to a
607  // memory write are out-of-place, i.e., an alloc and copy is inserted.
608  return !bufferizesToMemoryWrite(opOperand);
609 }
610 
612  // In the absence of analysis information, we do not know if the values are
613  // equivalent. The conservative answer is "false".
614  return false;
615 }
616 
618  // In the absence of analysis information, we do not know if the values may be
619  // aliasing. The conservative answer is "true".
620  return true;
621 }
622 
624  // In the absence of analysis information, the conservative answer is "false".
625  return false;
626 }
627 
628 // bufferization.to_memref is not allowed to change the rank.
629 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
630 #ifndef NDEBUG
631  auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.getType());
632  assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
633  rankedTensorType.getRank()) &&
634  "to_memref would be invalid: mismatching ranks");
635 #endif
636 }
637 
639  const BufferizationOptions &options) {
640 #ifndef NDEBUG
641  auto tensorType = llvm::dyn_cast<TensorType>(value.getType());
642  assert(tensorType && "unexpected non-tensor type");
643 #endif // NDEBUG
644 
645  // Replace "%t = to_tensor %m" with %m.
646  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
647  return toTensorOp.getMemref();
648 
649  // Insert to_memref op.
650  OpBuilder::InsertionGuard g(rewriter);
651  setInsertionPointAfter(rewriter, value);
652  FailureOr<BaseMemRefType> memrefType = getBufferType(value, options);
653  if (failed(memrefType))
654  return failure();
655  ensureToMemrefOpIsValid(value, *memrefType);
656  return rewriter
657  .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
658  .getResult();
659 }
660 
661 /// Return the buffer type for a given Value (tensor) after bufferization.
664  SmallVector<Value> invocationStack;
665  return getBufferType(value, options, invocationStack);
666 }
667 
668 /// Return the buffer type for a given Value (tensor) after bufferization.
671  SmallVector<Value> &invocationStack) {
672  assert(llvm::isa<TensorType>(value.getType()) &&
673  "unexpected non-tensor type");
674  invocationStack.push_back(value);
675  auto popFromStack =
676  llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
677 
678  // Try querying BufferizableOpInterface.
679  Operation *op = getOwnerOfValue(value);
680  auto bufferizableOp = options.dynCastBufferizableOp(op);
681  if (bufferizableOp)
682  return bufferizableOp.getBufferType(value, options, invocationStack);
683 
684  // Op is not bufferizable.
685  auto memSpace =
686  options.defaultMemorySpaceFn(value.getType().cast<TensorType>());
687  if (!memSpace.has_value())
688  return op->emitError("could not infer memory space");
689 
690  return getMemRefType(value, options, /*layout=*/{}, *memSpace);
691 }
692 
694  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
695  return bufferizableOp.hasTensorSemantics();
697 }
698 
700  Operation *op,
701  ValueRange values) {
702  assert(values.size() == op->getNumResults() &&
703  "expected one value per OpResult");
704  OpBuilder::InsertionGuard g(rewriter);
705 
706  // Replace all OpResults with the given values.
707  SmallVector<Value> replacements;
708  for (OpResult opResult : op->getOpResults()) {
709  Value replacement = values[opResult.getResultNumber()];
710  if (llvm::isa<TensorType>(opResult.getType())) {
711  // The OpResult is a tensor. Such values are replaced with memrefs during
712  // bufferization.
713  assert((llvm::isa<MemRefType>(replacement.getType()) ||
714  llvm::isa<UnrankedMemRefType>(replacement.getType())) &&
715  "tensor op result should be replaced with a memref value");
716  // The existing uses of the OpResult still expect a tensor. Insert a
717  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
718  // loose all of its users and eventually DCE away.
719  rewriter.setInsertionPointAfter(op);
720  replacement = rewriter.create<bufferization::ToTensorOp>(
721  replacement.getLoc(), replacement);
722  }
723  replacements.push_back(replacement);
724  }
725 
726  rewriter.replaceOp(op, replacements);
727 }
728 
729 //===----------------------------------------------------------------------===//
730 // Bufferization-specific scoped alloc insertion support.
731 //===----------------------------------------------------------------------===//
732 
733 /// Create a memref allocation with the given type and dynamic extents.
735  MemRefType type,
736  ValueRange dynShape) const {
737  if (allocationFn)
738  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
739 
740  // Default bufferallocation via AllocOp.
741  if (bufferAlignment != 0)
742  return b
743  .create<memref::AllocOp>(loc, type, dynShape,
745  .getResult();
746  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
747 }
748 
749 /// Create a memory copy between two memref buffers.
751  Value from, Value to) const {
752  if (memCpyFn)
753  return (*memCpyFn)(b, loc, from, to);
754 
755  b.create<memref::CopyOp>(loc, from, to);
756  return success();
757 }
758 
759 //===----------------------------------------------------------------------===//
760 // Bufferization-specific IRMapping support with debugging.
761 //===----------------------------------------------------------------------===//
762 
765  MemRefLayoutAttrInterface layout,
766  Attribute memorySpace) {
767  auto tensorType = llvm::cast<TensorType>(value.getType());
768 
769  // Case 1: Unranked memref type.
770  if (auto unrankedTensorType =
771  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
772  assert(!layout && "UnrankedTensorType cannot have a layout map");
773  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
774  memorySpace);
775  }
776 
777  // Case 2: Ranked memref type with specified layout.
778  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
779  if (layout) {
780  return MemRefType::get(rankedTensorType.getShape(),
781  rankedTensorType.getElementType(), layout,
782  memorySpace);
783  }
784 
785  return options.unknownTypeConverterFn(value, memorySpace, options);
786 }
787 
790  Attribute memorySpace) {
791  // Case 1: Unranked memref type.
792  if (auto unrankedTensorType =
793  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
794  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
795  memorySpace);
796  }
797 
798  // Case 2: Ranked memref type.
799  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
800  int64_t dynamicOffset = ShapedType::kDynamic;
801  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
802  ShapedType::kDynamic);
803  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
804  dynamicOffset, dynamicStrides);
805  return MemRefType::get(rankedTensorType.getShape(),
806  rankedTensorType.getElementType(), stridedLayout,
807  memorySpace);
808 }
809 
810 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
811 /// the given tensor type is unranked, return an unranked MemRef type.
814  Attribute memorySpace) {
815  // Case 1: Unranked memref type.
816  if (auto unrankedTensorType =
817  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
818  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
819  memorySpace);
820  }
821 
822  // Case 2: Ranked memref type.
823  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
824  MemRefLayoutAttrInterface layout = {};
825  return MemRefType::get(rankedTensorType.getShape(),
826  rankedTensorType.getElementType(), layout,
827  memorySpace);
828 }
829 
830 //===----------------------------------------------------------------------===//
831 // Default implementations of interface methods
832 //===----------------------------------------------------------------------===//
833 
835  OpResult opResult, const AnalysisState &state) {
836  auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
837  AliasingOpOperandList opOperands =
838  bufferizableOp.getAliasingOpOperands(opResult, state);
839 
840  // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
841  // memory writes.
842  if (opOperands.getAliases().empty())
843  return true;
844 
845  // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
846  // may bufferize to a memory write.
847  if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
848  return state.bufferizesToMemoryWrite(*alias.opOperand);
849  }))
850  return true;
851 
852  // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
853  // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
854  // case, the OpResult bufferizes to a memory write. E.g.:
855  //
856  // %0 = "some_writing_op" : tensor<?xf32>
857  // %r = scf.if ... -> tensor<?xf32> {
858  // scf.yield %0 : tensor<?xf32>
859  // } else {
860  // %1 = "another_writing_op"(%0) : tensor<?xf32>
861  // scf.yield %1 : tensor<?xf32>
862  // }
863  // "some_reading_op"(%r)
864  //
865  // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
866  // bufferizes to a memory write and the defining op is inside the scf.if.
867  //
868  // Note: This treatment of surrouding ops is useful for ops that have a
869  // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
870  // the analysis considerably.
871  //
872  // "another_writing_op" in the above example should be able to bufferize
873  // inplace in the absence of another read of %0. However, if the scf.if op
874  // would not be considered a "write", the analysis would detect the
875  // following conflict:
876  //
877  // * read = some_reading_op
878  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
879  // * conflictingWrite = %1
880  //
881  auto isMemoryWriteInsideOp = [&](Value v) {
882  Operation *op = getOwnerOfValue(v);
883  if (!opResult.getDefiningOp()->isAncestor(op))
884  return false;
885  return state.bufferizesToMemoryWrite(v);
886  };
887  TraversalConfig config;
888  config.alwaysIncludeLeaves = false;
889  for (AliasingOpOperand alias : opOperands) {
890  if (!state
891  .findValueInReverseUseDefChain(alias.opOperand->get(),
892  isMemoryWriteInsideOp, config)
893  .empty())
894  return true;
895  }
896  return false;
897 }
898 
899 // Compute the AliasingOpOperandList for a given Value based on
900 // getAliasingValues.
902  Value value, const AnalysisState &state) {
903  Operation *op = getOwnerOfValue(value);
905  for (OpOperand &opOperand : op->getOpOperands()) {
906  if (!llvm::isa<TensorType>(opOperand.get().getType()))
907  continue;
908  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
909  for (const auto &it : aliasingValues)
910  if (it.value == value)
911  result.emplace_back(&opOperand, it.relation, it.isDefinite);
912  }
913  return AliasingOpOperandList(std::move(result));
914 }
915 
917  Value value, const BufferizationOptions &options,
918  SmallVector<Value> &invocationStack) {
919  assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
920 
921  // No further analysis is possible for a block argument.
922  if (llvm::isa<BlockArgument>(value))
923  return bufferization::getMemRefType(value, options);
924 
925  // Value is an OpResult.
926  Operation *op = getOwnerOfValue(value);
927  auto opResult = llvm::cast<OpResult>(value);
928  AnalysisState state(options);
929  AliasingOpOperandList aliases = state.getAliasingOpOperands(opResult);
930  if (aliases.getNumAliases() > 0 &&
931  aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
932  // If the OpResult has an equivalent OpOperand, both OpResult and
933  // OpOperand bufferize to the exact same buffer type.
934  Value equivalentOperand = aliases.getAliases().front().opOperand->get();
935  return getBufferType(equivalentOperand, options, invocationStack);
936  }
937 
938  // If we do not know the memory space and there is no default memory space,
939  // report a failure.
940  auto memSpace =
941  options.defaultMemorySpaceFn(value.getType().cast<TensorType>());
942  if (!memSpace.has_value())
943  return op->emitError("could not infer memory space");
944 
945  return getMemRefType(value, options, /*layout=*/{}, *memSpace);
946 }
947 
949  BufferizableOpInterface bufferizableOp, unsigned index) {
950  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
951  auto regionInterface =
952  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
953  if (!regionInterface)
954  return false;
955  return regionInterface.isRepetitiveRegion(index);
956 }
957 
960  // TODO: Take into account successor blocks.
961  // No aliasing in case of non-entry blocks.
962  if (auto bbArg = dyn_cast<BlockArgument>(value))
963  if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
964  return {};
965 
966  // Unknown op: Conservatively assume that each OpResult may alias with every
967  // OpOperand. In addition, each block argument of an entry block may alias
968  // with every OpOperand.
970  for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
971  if (isa<TensorType>(operand.get().getType()))
972  r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
973  return r;
974 }
975 
978  // TODO: Take into account successor blocks.
979  // Unknown op: Conservatively assume that each OpResult may alias with every
980  // OpOperand. In addition, each block argument of an entry block may alias
981  // with every OpOperand.
983  for (OpResult result : opOperand.getOwner()->getOpResults())
984  if (llvm::isa<TensorType>(result.getType()))
985  r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
986  for (Region &region : opOperand.getOwner()->getRegions())
987  if (!region.getBlocks().empty())
988  for (BlockArgument bbArg : region.getBlocks().front().getArguments())
989  if (bbArg.getType().isa<TensorType>())
990  r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
991  return r;
992 }
993 
995  auto isaTensor = [](Type t) { return isa<TensorType>(t); };
996  bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
997  return any_of(r.getBlocks(), [&](Block &b) {
998  return any_of(b.getArguments(), [&](BlockArgument bbArg) {
999  return isaTensor(bbArg.getType());
1000  });
1001  });
1002  });
1003  if (hasTensorBlockArgument)
1004  return true;
1005 
1006  if (any_of(op->getResultTypes(), isaTensor))
1007  return true;
1008  return any_of(op->getOperandTypes(), isaTensor);
1009 }
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:263
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:138
This class represents an argument of a Block.
Definition: Value.h:315
Block represents an ordered list of Operations.
Definition: Block.h:30
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:26
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:128
This class provides support for representing a failure result, or a valid value of type T.
Definition: LogicalResult.h:78
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:350
This class helps build Operations.
Definition: Builders.h:209
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:433
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:400
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:464
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:414
This class represents an operand of an operation.
Definition: Value.h:263
This is a value defined by a result of an operation.
Definition: Value.h:453
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition: Operation.h:234
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:213
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:672
operand_type_range getOperandTypes()
Definition: Operation.h:392
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:378
result_type_range getResultTypes()
Definition: Operation.h:423
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition: Operation.h:263
result_range getOpResults()
Definition: Operation.h:415
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition: Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:399
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition: Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition: Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition: Region.h:200
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:399
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:628
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:91
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:104
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
U cast() const
Definition: Types.h:340
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:125
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:208
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition: Value.cpp:41
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig()) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands.
SetVector< Value > findDefinitions(Value value) const
Find the values that may define the contents of the given value at runtime.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:285
Include the generated interface declarations.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value.
Definition: LogicalResult.h:68
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.
bool followUnknownOps
Specifies whether unknown/non-bufferizable/ops not included in the OpFilter of BufferizationOptions s...
bool alwaysIncludeLeaves
Specifies if leaves (that do not have further OpOperands to follow) should be returned even if they d...
bool followSameTypeOrCastsOnly
Specifies whether OpOperands with a different type that are not the result of a CastOpInterface op sh...
bool followInPlaceOnly
Specifies whether out-of-place/undecided OpOperands should be followed.
bool followEquivalentOnly
Specifies whether non-equivalent OpOperands should be followed.
bool revisitAlreadyVisitedValues
Specifies whether already visited values should be visited again.