MLIR  20.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
15 #include "mlir/IR/BuiltinOps.h"
16 #include "mlir/IR/IRMapping.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
21 #include "llvm/ADT/ScopeExit.h"
22 #include "llvm/Support/Debug.h"
23 
24 //===----------------------------------------------------------------------===//
25 // BufferizableOpInterface
26 //===----------------------------------------------------------------------===//
27 
28 namespace mlir {
29 namespace bufferization {
30 
31 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
32 
33 } // namespace bufferization
34 } // namespace mlir
35 
37 
38 #define DEBUG_TYPE "bufferizable-op-interface"
39 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
40 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
41 
42 using namespace mlir;
43 using namespace bufferization;
44 
45 static bool isRepetitiveRegion(Region *region,
47  Operation *op = region->getParentOp();
48  if (auto bufferizableOp = options.dynCastBufferizableOp(op))
49  if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
50  return true;
51  return false;
52 }
53 
56  if (!op->getBlock())
57  return nullptr;
58  if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
59  iter != enclosingRepetitiveRegionCache.end())
60  return iter->second;
61  return enclosingRepetitiveRegionCache[op] =
62  getEnclosingRepetitiveRegion(op->getBlock(), options);
63 }
64 
66  Value value, const BufferizationOptions &options) {
67  if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
68  iter != enclosingRepetitiveRegionCache.end())
69  return iter->second;
70 
71  Region *region = value.getParentRegion();
72  // Collect all visited regions since we only know the repetitive region we
73  // want to map it to later on
74  SmallVector<Region *> visitedRegions;
75  while (region) {
76  visitedRegions.push_back(region);
77  if (isRepetitiveRegion(region, options))
78  break;
79  region = region->getParentRegion();
80  }
81  enclosingRepetitiveRegionCache[value] = region;
82  for (Region *r : visitedRegions)
83  enclosingRepetitiveRegionCache[r] = region;
84  return region;
85 }
86 
88  Block *block, const BufferizationOptions &options) {
89  if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
90  iter != enclosingRepetitiveRegionCache.end())
91  return iter->second;
92 
93  Region *region = block->getParent();
94  Operation *op = nullptr;
95  // Collect all visited regions since we only know the repetitive region we
96  // want to map it to later on
97  SmallVector<Region *> visitedRegions;
98  do {
99  op = region->getParentOp();
100  if (isRepetitiveRegion(region, options))
101  break;
102  } while ((region = op->getParentRegion()));
103 
104  enclosingRepetitiveRegionCache[block] = region;
105  for (Region *r : visitedRegions)
106  enclosingRepetitiveRegionCache[r] = region;
107  return region;
108 }
109 
110 void AnalysisState::resetCache() { enclosingRepetitiveRegionCache.clear(); }
111 
113  Region *region, const BufferizationOptions &options) {
114  assert(isRepetitiveRegion(region, options) && "expected repetitive region");
115  while ((region = region->getParentRegion())) {
116  if (isRepetitiveRegion(region, options))
117  break;
118  }
119  return region;
120 }
121 
123  const BufferizationOptions &options) {
124  while (region) {
125  auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
126  if (bufferizableOp &&
127  bufferizableOp.isParallelRegion(region->getRegionNumber())) {
128  assert(isRepetitiveRegion(region, options) &&
129  "expected that all parallel regions are also repetitive regions");
130  return region;
131  }
132  region = region->getParentRegion();
133  }
134  return nullptr;
135 }
136 
138  if (auto opResult = llvm::dyn_cast<OpResult>(value))
139  return opResult.getDefiningOp();
140  return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
141 }
142 
143 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
144 /// shaped value is copied. Otherwise, a tensor with undefined contents is
145 /// allocated.
147  OpBuilder &b, Location loc, Value shapedValue,
148  const BufferizationOptions &options, bool copy) {
149  Value tensor;
150  if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
151  tensor = shapedValue;
152  } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
153  tensor = b.create<ToTensorOp>(loc, shapedValue);
154  } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
155  llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
156  return getOwnerOfValue(shapedValue)
157  ->emitError("copying of unranked tensors is not implemented");
158  } else {
159  llvm_unreachable("expected RankedTensorType or MemRefType");
160  }
161  RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
162  SmallVector<Value> dynamicSizes;
163  if (!copy) {
164  // Compute the dynamic part of the shape.
165  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
166  bool reifiedShapes = false;
167  if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
168  llvm::isa<OpResult>(shapedValue)) {
169  ReifiedRankedShapedTypeDims resultDims;
170  if (succeeded(
171  reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
172  reifiedShapes = true;
173  auto &shape =
174  resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
175  for (const auto &dim : enumerate(tensorType.getShape()))
176  if (ShapedType::isDynamic(dim.value()))
177  dynamicSizes.push_back(cast<Value>(shape[dim.index()]));
178  }
179  }
180 
181  // If the shape could not be reified, create DimOps.
182  if (!reifiedShapes)
183  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
184  }
185 
186  // Create AllocTensorOp.
187  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
188  copy ? tensor : Value());
189 
190  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
191  if (copy)
192  return allocTensorOp.getResult();
193  FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
194  if (failed(copyBufferType))
195  return failure();
196  std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
197  if (!memorySpace)
198  memorySpace = options.defaultMemorySpaceFn(tensorType);
199  if (memorySpace.has_value())
200  allocTensorOp.setMemorySpaceAttr(memorySpace.value());
201  return allocTensorOp.getResult();
202 }
203 
204 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
205  RewriterBase &rewriter, const AnalysisState &state) {
206  OpBuilder::InsertionGuard g(rewriter);
207  Operation *op = getOperation();
208  SmallVector<OpOperand *> outOfPlaceOpOperands;
209  DenseSet<OpOperand *> copiedOpOperands;
210  SmallVector<Value> outOfPlaceValues;
211  DenseSet<Value> copiedOpValues;
212 
213  // Find all out-of-place OpOperands.
214  for (OpOperand &opOperand : op->getOpOperands()) {
215  Type operandType = opOperand.get().getType();
216  if (!llvm::isa<TensorType>(operandType))
217  continue;
218  if (state.isInPlace(opOperand))
219  continue;
220  if (llvm::isa<UnrankedTensorType>(operandType))
221  return op->emitError("copying of unranked tensors is not implemented");
222 
223  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
224  if (aliasingValues.getNumAliases() == 1 &&
225  isa<OpResult>(aliasingValues.getAliases()[0].value) &&
226  !state.bufferizesToMemoryWrite(opOperand) &&
227  state.getAliasingOpOperands(aliasingValues.getAliases()[0].value)
228  .getNumAliases() == 1 &&
229  !isa<UnrankedTensorType>(
230  aliasingValues.getAliases()[0].value.getType())) {
231  // The op itself does not write but may create exactly one alias. Instead
232  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
233  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
234  // where the result is usually a smaller part of the source). Do not apply
235  // this optimization if the OpResult is an unranked tensor (because those
236  // cannot be copied at the moment).
237  Value value = aliasingValues.getAliases()[0].value;
238  outOfPlaceValues.push_back(value);
239  if (!state.canOmitTensorCopy(opOperand))
240  copiedOpValues.insert(value);
241  } else {
242  // In all other cases, make a copy of the OpOperand.
243  outOfPlaceOpOperands.push_back(&opOperand);
244  if (!state.canOmitTensorCopy(opOperand))
245  copiedOpOperands.insert(&opOperand);
246  }
247  }
248 
249  // Insert copies of OpOperands.
250  rewriter.setInsertionPoint(op);
251  for (OpOperand *opOperand : outOfPlaceOpOperands) {
252  FailureOr<Value> copy = allocateTensorForShapedValue(
253  rewriter, op->getLoc(), opOperand->get(), state.getOptions(),
254  copiedOpOperands.contains(opOperand));
255  if (failed(copy))
256  return failure();
257  rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
258  }
259 
260  // Insert copies of Values.
261  rewriter.setInsertionPointAfter(op);
262  for (Value value : outOfPlaceValues) {
263  FailureOr<Value> copy = allocateTensorForShapedValue(
264  rewriter, op->getLoc(), value, state.getOptions(),
265  copiedOpValues.count(value));
266  if (failed(copy))
267  return failure();
268  SmallVector<OpOperand *> uses = llvm::to_vector(
269  llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
270  for (OpOperand *use : uses) {
271  // Do not update the alloc_tensor op that we just created.
272  if (use->getOwner() == copy->getDefiningOp())
273  continue;
274  // tensor.dim ops may have been created to be used as alloc_tensor op
275  // dynamic extents. Do not update these either.
276  if (isa<tensor::DimOp>(use->getOwner()))
277  continue;
278  rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
279  }
280  }
281 
282  return success();
283 }
284 
285 //===----------------------------------------------------------------------===//
286 // OpFilter
287 //===----------------------------------------------------------------------===//
288 
290  // All other ops: Allow/disallow according to filter.
291  bool isAllowed = !hasAllowRule();
292  for (const Entry &entry : entries) {
293  bool filterResult = entry.fn(op);
294  switch (entry.type) {
295  case Entry::ALLOW:
296  isAllowed |= filterResult;
297  break;
298  case Entry::DENY:
299  if (filterResult)
300  // DENY filter matches. This op is no allowed. (Even if other ALLOW
301  // filters may match.)
302  return false;
303  };
304  }
305  return isAllowed;
306 }
307 
308 //===----------------------------------------------------------------------===//
309 // BufferizationOptions
310 //===----------------------------------------------------------------------===//
311 
312 namespace {
313 
314 /// Default function arg type converter: Use a fully dynamic layout map.
316 defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace,
317  func::FuncOp funcOp,
318  const BufferizationOptions &options) {
319  return getMemRefTypeWithFullyDynamicLayout(type, memorySpace);
320 }
321 /// Default unknown type converter: Use a fully dynamic layout map.
323 defaultUnknownTypeConverter(Value value, Attribute memorySpace,
324  const BufferizationOptions &options) {
326  llvm::cast<TensorType>(value.getType()), memorySpace);
327 }
328 
329 } // namespace
330 
331 // Default constructor for BufferizationOptions.
333  : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
334  unknownTypeConverterFn(defaultUnknownTypeConverter) {}
335 
337  // Special case: If function boundary bufferization is deactivated, do not
338  // allow ops that belong to the `func` dialect.
339  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
340  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
341  return false;
342 
343  return opFilter.isOpAllowed(op);
344 }
345 
346 BufferizableOpInterface
348  if (!isOpAllowed(op))
349  return nullptr;
350  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
351  if (!bufferizableOp)
352  return nullptr;
353  return bufferizableOp;
354 }
355 
356 BufferizableOpInterface
358  return dynCastBufferizableOp(getOwnerOfValue(value));
359 }
360 
362  LayoutMapOption layoutMapOption) {
363  functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace,
364  func::FuncOp funcOp,
365  const BufferizationOptions &options) {
366  if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
368  memorySpace);
370  memorySpace);
371  };
373  layoutMapOption == LayoutMapOption::InferLayoutMap;
374 }
375 
376 //===----------------------------------------------------------------------===//
377 // Helper functions for BufferizableOpInterface
378 //===----------------------------------------------------------------------===//
379 
380 static void setInsertionPointAfter(OpBuilder &b, Value value) {
381  if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
382  b.setInsertionPointToStart(bbArg.getOwner());
383  } else {
385  }
386 }
387 
388 /// Determine which OpOperand* will alias with `value` if the op is bufferized
389 /// in place. Return all tensor OpOperand* if the op is not bufferizable.
391  if (Operation *op = getOwnerOfValue(value))
392  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
393  return bufferizableOp.getAliasingOpOperands(value, *this);
394 
395  // The op is not bufferizable.
397 }
398 
399 /// Determine which Values will alias with `opOperand` if the op is bufferized
400 /// in place. Return all tensor Values if the op is not bufferizable.
402  if (auto bufferizableOp =
403  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
404  return bufferizableOp.getAliasingValues(opOperand, *this);
405 
406  // The op is not bufferizable.
407  return detail::unknownGetAliasingValues(opOperand);
408 }
409 
410 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
411 /// op is not bufferizable.
413  if (auto bufferizableOp =
414  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
415  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
416 
417  // Unknown op that returns a tensor. The inplace analysis does not support it.
418  // Conservatively return true.
419  return true;
420 }
421 
422 /// Return true if `opOperand` bufferizes to a memory write. Return
423 /// `true` if the op is not bufferizable.
425  if (auto bufferizableOp =
426  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
427  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
428 
429  // Unknown op that returns a tensor. The inplace analysis does not support it.
430  // Conservatively return true.
431  return true;
432 }
433 
434 /// Return true if `opOperand` does neither read nor write but bufferizes to an
435 /// alias. Return false if the op is not bufferizable.
437  if (auto bufferizableOp =
438  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
439  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
440 
441  // Unknown op that returns a tensor. The inplace analysis does not support it.
442  // Conservatively return false.
443  return false;
444 }
445 
447  auto opResult = llvm::dyn_cast<OpResult>(value);
448  if (!opResult)
449  return true;
450  auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
451  if (!bufferizableOp)
452  return true;
453  return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
454 }
455 
456 /// Return true if the given value is read by an op that bufferizes to a memory
457 /// read. Also takes into account ops that create an alias but do not read by
458 /// themselves (e.g., ExtractSliceOp).
460  assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
461  SmallVector<OpOperand *> workingSet;
462  DenseSet<OpOperand *> visited;
463  for (OpOperand &use : value.getUses())
464  workingSet.push_back(&use);
465 
466  while (!workingSet.empty()) {
467  OpOperand *uMaybeReading = workingSet.pop_back_val();
468  if (!visited.insert(uMaybeReading).second)
469  continue;
470 
471  // Skip over all ops that neither read nor write (but create an alias).
472  if (bufferizesToAliasOnly(*uMaybeReading))
473  for (AliasingValue alias : getAliasingValues(*uMaybeReading))
474  for (OpOperand &use : alias.value.getUses())
475  workingSet.push_back(&use);
476  if (bufferizesToMemoryRead(*uMaybeReading))
477  return true;
478  }
479 
480  return false;
481 }
482 
483 // Starting from `value`, follow the use-def chain in reverse, always selecting
484 // the aliasing OpOperands. Find and return Values for which `condition`
485 // evaluates to true. OpOperands of such matching Values are not traversed any
486 // further, the visited aliasing opOperands will be preserved through
487 // `visitedOpOperands`.
489  Value value, llvm::function_ref<bool(Value)> condition,
491  llvm::DenseSet<OpOperand *> *visitedOpOperands) const {
492  llvm::DenseSet<Value> visited;
493  llvm::SetVector<Value> result, workingSet;
494  workingSet.insert(value);
495 
496  while (!workingSet.empty()) {
497  Value value = workingSet.pop_back_val();
498 
499  if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
500  // Stop traversal if value was already visited.
501  if (config.alwaysIncludeLeaves)
502  result.insert(value);
503  continue;
504  }
505  visited.insert(value);
506 
507  if (condition(value)) {
508  result.insert(value);
509  continue;
510  }
511 
512  if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
513  // Stop iterating if `followUnknownOps` is unset and the op is either
514  // not bufferizable or excluded in the OpFilter.
515  if (config.alwaysIncludeLeaves)
516  result.insert(value);
517  continue;
518  }
519 
521  if (aliases.getNumAliases() == 0) {
522  // The traversal ends naturally if there are no more OpOperands that
523  // could be followed.
524  if (config.alwaysIncludeLeaves)
525  result.insert(value);
526  continue;
527  }
528 
529  for (AliasingOpOperand a : aliases) {
530  if (config.followEquivalentOnly &&
531  a.relation != BufferRelation::Equivalent) {
532  // Stop iterating if `followEquivalentOnly` is set but the alias is not
533  // equivalent.
534  if (config.alwaysIncludeLeaves)
535  result.insert(value);
536  continue;
537  }
538 
539  if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
540  // Stop iterating if `followInPlaceOnly` is set but the alias is
541  // out-of-place.
542  if (config.alwaysIncludeLeaves)
543  result.insert(value);
544  continue;
545  }
546 
547  if (config.followSameTypeOrCastsOnly &&
548  a.opOperand->get().getType() != value.getType() &&
549  !value.getDefiningOp<CastOpInterface>()) {
550  // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
551  // has a different type and the op is not a cast.
552  if (config.alwaysIncludeLeaves)
553  result.insert(value);
554  continue;
555  }
556 
557  workingSet.insert(a.opOperand->get());
558  if (visitedOpOperands)
559  visitedOpOperands->insert(a.opOperand);
560  }
561  }
562 
563  return result;
564 }
565 
566 // Find the values that define the contents of the given value.
569  config.alwaysIncludeLeaves = false;
571  value, [&](Value v) { return this->bufferizesToMemoryWrite(v); }, config);
572 }
573 
576 
578  : options(options), type(type) {
580  options.stateInitializers)
581  fn(*this);
582 }
583 
585  // Do not copy if the tensor has undefined contents.
586  if (hasUndefinedContents(&opOperand))
587  return true;
588 
589  // Do not copy if the buffer of the tensor is entirely overwritten (with
590  // values that do not depend on the old tensor).
591  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
592  return true;
593 
594  // Do not copy if the tensor is never read.
595  AliasingValueList aliases = getAliasingValues(opOperand);
596  if (!bufferizesToMemoryRead(opOperand) &&
597  llvm::none_of(aliases,
598  [&](AliasingValue a) { return isValueRead(a.value); }))
599  return true;
600 
601  // Default: Cannot omit the copy.
602  return false;
603 }
604 
605 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
606  // ToMemrefOps are always in-place.
607  if (isa<ToMemrefOp>(opOperand.getOwner()))
608  return true;
609 
610  // In the absence of analysis information, OpOperands that bufferize to a
611  // memory write are out-of-place, i.e., an alloc and copy is inserted.
612  return !bufferizesToMemoryWrite(opOperand);
613 }
614 
616  // In the absence of analysis information, we do not know if the values are
617  // equivalent. The conservative answer is "false".
618  return false;
619 }
620 
622  // In the absence of analysis information, we do not know if the values may be
623  // aliasing. The conservative answer is "true".
624  return true;
625 }
626 
628  // In the absence of analysis information, the conservative answer is "false".
629  return false;
630 }
631 
632 // bufferization.to_memref is not allowed to change the rank.
633 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
634 #ifndef NDEBUG
635  auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.getType());
636  assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
637  rankedTensorType.getRank()) &&
638  "to_memref would be invalid: mismatching ranks");
639 #endif
640 }
641 
642 FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
643  const BufferizationOptions &options) {
644 #ifndef NDEBUG
645  auto tensorType = llvm::dyn_cast<TensorType>(value.getType());
646  assert(tensorType && "unexpected non-tensor type");
647 #endif // NDEBUG
648 
649  // Replace "%t = to_tensor %m" with %m.
650  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
651  return toTensorOp.getMemref();
652 
653  // Insert to_memref op.
654  OpBuilder::InsertionGuard g(rewriter);
655  setInsertionPointAfter(rewriter, value);
656  FailureOr<BaseMemRefType> memrefType = getBufferType(value, options);
657  if (failed(memrefType))
658  return failure();
659  ensureToMemrefOpIsValid(value, *memrefType);
660  return rewriter
661  .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
662  .getResult();
663 }
664 
665 /// Return the buffer type for a given Value (tensor) after bufferization.
666 FailureOr<BaseMemRefType>
668  SmallVector<Value> invocationStack;
669  return getBufferType(value, options, invocationStack);
670 }
671 
672 /// Return the buffer type for a given Value (tensor) after bufferization.
673 FailureOr<BaseMemRefType>
675  SmallVector<Value> &invocationStack) {
676  assert(llvm::isa<TensorType>(value.getType()) &&
677  "unexpected non-tensor type");
678  invocationStack.push_back(value);
679  auto popFromStack =
680  llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
681 
682  // Try querying BufferizableOpInterface.
683  Operation *op = getOwnerOfValue(value);
684  auto bufferizableOp = options.dynCastBufferizableOp(op);
685  if (bufferizableOp)
686  return bufferizableOp.getBufferType(value, options, invocationStack);
687 
688  // Op is not bufferizable.
689  auto memSpace =
690  options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
691  if (!memSpace.has_value())
692  return op->emitError("could not infer memory space");
693 
694  return getMemRefType(value, options, /*layout=*/{}, *memSpace);
695 }
696 
698  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
699  return bufferizableOp.hasTensorSemantics();
701 }
702 
704  Operation *op,
705  ValueRange values) {
706  assert(values.size() == op->getNumResults() &&
707  "expected one value per OpResult");
708  OpBuilder::InsertionGuard g(rewriter);
709 
710  // Replace all OpResults with the given values.
711  SmallVector<Value> replacements;
712  for (OpResult opResult : op->getOpResults()) {
713  Value replacement = values[opResult.getResultNumber()];
714  if (llvm::isa<TensorType>(opResult.getType())) {
715  // The OpResult is a tensor. Such values are replaced with memrefs during
716  // bufferization.
717  assert((llvm::isa<MemRefType>(replacement.getType()) ||
718  llvm::isa<UnrankedMemRefType>(replacement.getType())) &&
719  "tensor op result should be replaced with a memref value");
720  // The existing uses of the OpResult still expect a tensor. Insert a
721  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
722  // loose all of its users and eventually DCE away.
723  rewriter.setInsertionPointAfter(op);
724  replacement = rewriter.create<bufferization::ToTensorOp>(
725  replacement.getLoc(), opResult.getType(), replacement);
726  }
727  replacements.push_back(replacement);
728  }
729 
730  rewriter.replaceOp(op, replacements);
731 }
732 
733 //===----------------------------------------------------------------------===//
734 // Bufferization-specific scoped alloc insertion support.
735 //===----------------------------------------------------------------------===//
736 
737 /// Create a memref allocation with the given type and dynamic extents.
739  MemRefType type,
740  ValueRange dynShape) const {
741  if (allocationFn)
742  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
743 
744  // Default bufferallocation via AllocOp.
745  if (bufferAlignment != 0)
746  return b
747  .create<memref::AllocOp>(loc, type, dynShape,
749  .getResult();
750  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
751 }
752 
753 /// Create a memory copy between two memref buffers.
755  Value from, Value to) const {
756  if (memCpyFn)
757  return (*memCpyFn)(b, loc, from, to);
758 
759  b.create<memref::CopyOp>(loc, from, to);
760  return success();
761 }
762 
763 //===----------------------------------------------------------------------===//
764 // Bufferization-specific IRMapping support with debugging.
765 //===----------------------------------------------------------------------===//
766 
769  MemRefLayoutAttrInterface layout,
770  Attribute memorySpace) {
771  auto tensorType = llvm::cast<TensorType>(value.getType());
772 
773  // Case 1: Unranked memref type.
774  if (auto unrankedTensorType =
775  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
776  assert(!layout && "UnrankedTensorType cannot have a layout map");
777  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
778  memorySpace);
779  }
780 
781  // Case 2: Ranked memref type with specified layout.
782  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
783  if (layout) {
784  return MemRefType::get(rankedTensorType.getShape(),
785  rankedTensorType.getElementType(), layout,
786  memorySpace);
787  }
788 
789  return options.unknownTypeConverterFn(value, memorySpace, options);
790 }
791 
794  Attribute memorySpace) {
795  // Case 1: Unranked memref type.
796  if (auto unrankedTensorType =
797  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
798  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
799  memorySpace);
800  }
801 
802  // Case 2: Ranked memref type.
803  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
804  int64_t dynamicOffset = ShapedType::kDynamic;
805  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
806  ShapedType::kDynamic);
807  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
808  dynamicOffset, dynamicStrides);
809  return MemRefType::get(rankedTensorType.getShape(),
810  rankedTensorType.getElementType(), stridedLayout,
811  memorySpace);
812 }
813 
814 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
815 /// the given tensor type is unranked, return an unranked MemRef type.
818  Attribute memorySpace) {
819  // Case 1: Unranked memref type.
820  if (auto unrankedTensorType =
821  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
822  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
823  memorySpace);
824  }
825 
826  // Case 2: Ranked memref type.
827  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
828  MemRefLayoutAttrInterface layout = {};
829  return MemRefType::get(rankedTensorType.getShape(),
830  rankedTensorType.getElementType(), layout,
831  memorySpace);
832 }
833 
834 //===----------------------------------------------------------------------===//
835 // Default implementations of interface methods
836 //===----------------------------------------------------------------------===//
837 
839  OpResult opResult, const AnalysisState &state) {
840  auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
841  AliasingOpOperandList opOperands =
842  bufferizableOp.getAliasingOpOperands(opResult, state);
843 
844  // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
845  // memory writes.
846  if (opOperands.getAliases().empty())
847  return true;
848 
849  // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
850  // may bufferize to a memory write.
851  if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
852  return state.bufferizesToMemoryWrite(*alias.opOperand);
853  }))
854  return true;
855 
856  // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
857  // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
858  // case, the OpResult bufferizes to a memory write. E.g.:
859  //
860  // %0 = "some_writing_op" : tensor<?xf32>
861  // %r = scf.if ... -> tensor<?xf32> {
862  // scf.yield %0 : tensor<?xf32>
863  // } else {
864  // %1 = "another_writing_op"(%0) : tensor<?xf32>
865  // scf.yield %1 : tensor<?xf32>
866  // }
867  // "some_reading_op"(%r)
868  //
869  // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
870  // bufferizes to a memory write and the defining op is inside the scf.if.
871  //
872  // Note: This treatment of surrouding ops is useful for ops that have a
873  // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
874  // the analysis considerably.
875  //
876  // "another_writing_op" in the above example should be able to bufferize
877  // inplace in the absence of another read of %0. However, if the scf.if op
878  // would not be considered a "write", the analysis would detect the
879  // following conflict:
880  //
881  // * read = some_reading_op
882  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
883  // * conflictingWrite = %1
884  //
885  auto isMemoryWriteInsideOp = [&](Value v) {
886  Operation *op = getOwnerOfValue(v);
887  if (!opResult.getDefiningOp()->isAncestor(op))
888  return false;
889  return state.bufferizesToMemoryWrite(v);
890  };
892  config.alwaysIncludeLeaves = false;
893  for (AliasingOpOperand alias : opOperands) {
894  if (!state
895  .findValueInReverseUseDefChain(alias.opOperand->get(),
896  isMemoryWriteInsideOp, config)
897  .empty())
898  return true;
899  }
900  return false;
901 }
902 
903 // Compute the AliasingOpOperandList for a given Value based on
904 // getAliasingValues.
906  Value value, const AnalysisState &state) {
907  Operation *op = getOwnerOfValue(value);
909  for (OpOperand &opOperand : op->getOpOperands()) {
910  if (!llvm::isa<TensorType>(opOperand.get().getType()))
911  continue;
912  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
913  for (const auto &it : aliasingValues)
914  if (it.value == value)
915  result.emplace_back(&opOperand, it.relation, it.isDefinite);
916  }
917  return AliasingOpOperandList(std::move(result));
918 }
919 
921  Value value, const BufferizationOptions &options,
922  SmallVector<Value> &invocationStack) {
923  assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
924 
925  // No further analysis is possible for a block argument.
926  if (llvm::isa<BlockArgument>(value))
927  return bufferization::getMemRefType(value, options);
928 
929  // Value is an OpResult.
930  Operation *op = getOwnerOfValue(value);
931  auto opResult = llvm::cast<OpResult>(value);
932  AnalysisState state(options);
933  AliasingOpOperandList aliases = state.getAliasingOpOperands(opResult);
934  if (aliases.getNumAliases() > 0 &&
935  aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
936  // If the OpResult has an equivalent OpOperand, both OpResult and
937  // OpOperand bufferize to the exact same buffer type.
938  Value equivalentOperand = aliases.getAliases().front().opOperand->get();
939  return getBufferType(equivalentOperand, options, invocationStack);
940  }
941 
942  // If we do not know the memory space and there is no default memory space,
943  // report a failure.
944  auto memSpace =
945  options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
946  if (!memSpace.has_value())
947  return op->emitError("could not infer memory space");
948 
949  return getMemRefType(value, options, /*layout=*/{}, *memSpace);
950 }
951 
953  BufferizableOpInterface bufferizableOp, unsigned index) {
954  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
955  auto regionInterface =
956  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
957  if (!regionInterface)
958  return false;
959  return regionInterface.isRepetitiveRegion(index);
960 }
961 
964  // TODO: Take into account successor blocks.
965  // No aliasing in case of non-entry blocks.
966  if (auto bbArg = dyn_cast<BlockArgument>(value))
967  if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
968  return {};
969 
970  // Unknown op: Conservatively assume that each OpResult may alias with every
971  // OpOperand. In addition, each block argument of an entry block may alias
972  // with every OpOperand.
974  for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
975  if (isa<TensorType>(operand.get().getType()))
976  r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
977  return r;
978 }
979 
982  // TODO: Take into account successor blocks.
983  // Unknown op: Conservatively assume that each OpResult may alias with every
984  // OpOperand. In addition, each block argument of an entry block may alias
985  // with every OpOperand.
987  for (OpResult result : opOperand.getOwner()->getOpResults())
988  if (llvm::isa<TensorType>(result.getType()))
989  r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
990  for (Region &region : opOperand.getOwner()->getRegions())
991  if (!region.getBlocks().empty())
992  for (BlockArgument bbArg : region.getBlocks().front().getArguments())
993  if (isa<TensorType>(bbArg.getType()))
994  r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
995  return r;
996 }
997 
999  auto isaTensor = [](Type t) { return isa<TensorType>(t); };
1000  bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
1001  return any_of(r.getBlocks(), [&](Block &b) {
1002  return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1003  return isaTensor(bbArg.getType());
1004  });
1005  });
1006  });
1007  if (hasTensorBlockArgument)
1008  return true;
1009 
1010  if (any_of(op->getResultTypes(), isaTensor))
1011  return true;
1012  return any_of(op->getOperandTypes(), isaTensor);
1013 }
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:263
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:149
This class represents an argument of a Block.
Definition: Value.h:319
Block represents an ordered list of Operations.
Definition: Block.h:33
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:29
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:152
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:357
This class helps build Operations.
Definition: Builders.h:216
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:440
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:407
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:421
This class represents an operand of an operation.
Definition: Value.h:267
This is a value defined by a result of an operation.
Definition: Value.h:457
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:407
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition: Operation.h:234
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:213
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:677
operand_type_range getOperandTypes()
Definition: Operation.h:397
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:383
result_type_range getResultTypes()
Definition: Operation.h:428
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition: Operation.h:263
result_range getOpResults()
Definition: Operation.h:420
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition: Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition: Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition: Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition: Region.h:200
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:636
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:102
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:104
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:212
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition: Value.cpp:41
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig(), llvm::DenseSet< OpOperand * > *visitedOpOperands=nullptr) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands.
SetVector< Value > findDefinitions(Value value) const
Find the values that may define the contents of the given value at runtime.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.