MLIR  21.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
15 #include "mlir/IR/BuiltinOps.h"
16 #include "mlir/IR/IRMapping.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
21 #include "llvm/ADT/ScopeExit.h"
22 #include "llvm/Support/Debug.h"
23 
24 //===----------------------------------------------------------------------===//
25 // BufferizableOpInterface
26 //===----------------------------------------------------------------------===//
27 
28 namespace mlir {
29 namespace bufferization {
30 
31 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
32 
33 } // namespace bufferization
34 } // namespace mlir
35 
37 
38 #define DEBUG_TYPE "bufferizable-op-interface"
39 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
40 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
41 
42 using namespace mlir;
43 using namespace bufferization;
44 
45 static bool isRepetitiveRegion(Region *region,
47  Operation *op = region->getParentOp();
48  if (auto bufferizableOp = options.dynCastBufferizableOp(op))
49  if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
50  return true;
51  return false;
52 }
53 
56  if (!op->getBlock())
57  return nullptr;
58  if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
59  iter != enclosingRepetitiveRegionCache.end())
60  return iter->second;
61  return enclosingRepetitiveRegionCache[op] =
62  getEnclosingRepetitiveRegion(op->getBlock(), options);
63 }
64 
66  Value value, const BufferizationOptions &options) {
67  if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
68  iter != enclosingRepetitiveRegionCache.end())
69  return iter->second;
70 
71  Region *region = value.getParentRegion();
72  // Collect all visited regions since we only know the repetitive region we
73  // want to map it to later on
74  SmallVector<Region *> visitedRegions;
75  while (region) {
76  visitedRegions.push_back(region);
77  if (isRepetitiveRegion(region, options))
78  break;
79  region = region->getParentRegion();
80  }
81  enclosingRepetitiveRegionCache[value] = region;
82  for (Region *r : visitedRegions)
83  enclosingRepetitiveRegionCache[r] = region;
84  return region;
85 }
86 
88  Block *block, const BufferizationOptions &options) {
89  if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
90  iter != enclosingRepetitiveRegionCache.end())
91  return iter->second;
92 
93  Region *region = block->getParent();
94  Operation *op = nullptr;
95  // Collect all visited regions since we only know the repetitive region we
96  // want to map it to later on
97  SmallVector<Region *> visitedRegions;
98  do {
99  op = region->getParentOp();
100  if (isRepetitiveRegion(region, options))
101  break;
102  } while ((region = op->getParentRegion()));
103 
104  enclosingRepetitiveRegionCache[block] = region;
105  for (Region *r : visitedRegions)
106  enclosingRepetitiveRegionCache[r] = region;
107  return region;
108 }
109 
111  Operation *op1) {
112  auto key = std::make_pair(op0, op1);
113  if (auto iter = insideMutuallyExclusiveRegionsCache.find(key);
114  iter != insideMutuallyExclusiveRegionsCache.end())
115  return iter->second;
116  bool result = ::mlir::insideMutuallyExclusiveRegions(op0, op1);
117  // Populate results for both orderings of the ops.
118  insideMutuallyExclusiveRegionsCache[key] = result;
119  insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] = result;
120  return result;
121 }
122 
124  enclosingRepetitiveRegionCache.clear();
125  insideMutuallyExclusiveRegionsCache.clear();
126 }
127 
129  return symbolTables;
130 }
131 
133  Region *region, const BufferizationOptions &options) {
134  assert(isRepetitiveRegion(region, options) && "expected repetitive region");
135  while ((region = region->getParentRegion())) {
136  if (isRepetitiveRegion(region, options))
137  break;
138  }
139  return region;
140 }
141 
143  const BufferizationOptions &options) {
144  while (region) {
145  auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
146  if (bufferizableOp &&
147  bufferizableOp.isParallelRegion(region->getRegionNumber())) {
148  assert(isRepetitiveRegion(region, options) &&
149  "expected that all parallel regions are also repetitive regions");
150  return region;
151  }
152  region = region->getParentRegion();
153  }
154  return nullptr;
155 }
156 
158  if (auto opResult = llvm::dyn_cast<OpResult>(value))
159  return opResult.getDefiningOp();
160  return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
161 }
162 
163 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
164 /// shaped value is copied. Otherwise, a tensor with undefined contents is
165 /// allocated.
167  OpBuilder &b, Location loc, Value shapedValue,
168  const BufferizationOptions &options, const BufferizationState &state,
169  bool copy) {
170  Value tensor;
171  if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
172  tensor = shapedValue;
173  } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
174  tensor = b.create<ToTensorOp>(
175  loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()),
176  shapedValue);
177  } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
178  llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
179  return getOwnerOfValue(shapedValue)
180  ->emitError("copying of unranked tensors is not implemented");
181  } else {
182  llvm_unreachable("expected RankedTensorType or MemRefType");
183  }
184  RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
185  SmallVector<Value> dynamicSizes;
186  if (!copy) {
187  // Compute the dynamic part of the shape.
188  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
189  bool reifiedShapes = false;
190  if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
191  llvm::isa<OpResult>(shapedValue)) {
192  ReifiedRankedShapedTypeDims resultDims;
193  if (succeeded(
194  reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
195  reifiedShapes = true;
196  auto &shape =
197  resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
198  for (const auto &dim : enumerate(tensorType.getShape()))
199  if (ShapedType::isDynamic(dim.value()))
200  dynamicSizes.push_back(cast<Value>(shape[dim.index()]));
201  }
202  }
203 
204  // If the shape could not be reified, create DimOps.
205  if (!reifiedShapes)
206  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
207  }
208 
209  // Create AllocTensorOp.
210  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
211  copy ? tensor : Value());
212 
213  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
214  if (copy)
215  return allocTensorOp.getResult();
216  auto copyBufferType =
217  detail::asMemRefType(getBufferType(tensor, options, state));
218  if (failed(copyBufferType))
219  return failure();
220  std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
221  if (!memorySpace)
222  memorySpace = options.defaultMemorySpaceFn(tensorType);
223  if (memorySpace.has_value())
224  allocTensorOp.setMemorySpaceAttr(memorySpace.value());
225  return allocTensorOp.getResult();
226 }
227 
228 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
229  RewriterBase &rewriter, const AnalysisState &analysisState,
230  const BufferizationState &bufferizationState) {
231  OpBuilder::InsertionGuard g(rewriter);
232  Operation *op = getOperation();
233  SmallVector<OpOperand *> outOfPlaceOpOperands;
234  DenseSet<OpOperand *> copiedOpOperands;
235  SmallVector<Value> outOfPlaceValues;
236  DenseSet<Value> copiedOpValues;
237 
238  // Find all out-of-place OpOperands.
239  for (OpOperand &opOperand : op->getOpOperands()) {
240  Type operandType = opOperand.get().getType();
241  if (!llvm::isa<TensorType>(operandType))
242  continue;
243  if (analysisState.isInPlace(opOperand))
244  continue;
245  if (llvm::isa<UnrankedTensorType>(operandType))
246  return op->emitError("copying of unranked tensors is not implemented");
247 
248  AliasingValueList aliasingValues =
249  analysisState.getAliasingValues(opOperand);
250  if (aliasingValues.getNumAliases() == 1 &&
251  isa<OpResult>(aliasingValues.getAliases()[0].value) &&
252  !analysisState.bufferizesToMemoryWrite(opOperand) &&
253  analysisState
254  .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
255  .getNumAliases() == 1 &&
256  !isa<UnrankedTensorType>(
257  aliasingValues.getAliases()[0].value.getType())) {
258  // The op itself does not write but may create exactly one alias. Instead
259  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
260  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
261  // where the result is usually a smaller part of the source). Do not apply
262  // this optimization if the OpResult is an unranked tensor (because those
263  // cannot be copied at the moment).
264  Value value = aliasingValues.getAliases()[0].value;
265  outOfPlaceValues.push_back(value);
266  if (!analysisState.canOmitTensorCopy(opOperand))
267  copiedOpValues.insert(value);
268  } else {
269  // In all other cases, make a copy of the OpOperand.
270  outOfPlaceOpOperands.push_back(&opOperand);
271  if (!analysisState.canOmitTensorCopy(opOperand))
272  copiedOpOperands.insert(&opOperand);
273  }
274  }
275 
276  // Insert copies of OpOperands.
277  rewriter.setInsertionPoint(op);
278  for (OpOperand *opOperand : outOfPlaceOpOperands) {
279  FailureOr<Value> copy = allocateTensorForShapedValue(
280  rewriter, op->getLoc(), opOperand->get(), analysisState.getOptions(),
281  bufferizationState, copiedOpOperands.contains(opOperand));
282  if (failed(copy))
283  return failure();
284  rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
285  }
286 
287  // Insert copies of Values.
288  rewriter.setInsertionPointAfter(op);
289  for (Value value : outOfPlaceValues) {
290  FailureOr<Value> copy = allocateTensorForShapedValue(
291  rewriter, op->getLoc(), value, analysisState.getOptions(),
292  bufferizationState, copiedOpValues.count(value));
293  if (failed(copy))
294  return failure();
295  SmallVector<OpOperand *> uses = llvm::to_vector(
296  llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
297  for (OpOperand *use : uses) {
298  // Do not update the alloc_tensor op that we just created.
299  if (use->getOwner() == copy->getDefiningOp())
300  continue;
301  // tensor.dim ops may have been created to be used as alloc_tensor op
302  // dynamic extents. Do not update these either.
303  if (isa<tensor::DimOp>(use->getOwner()))
304  continue;
305  rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
306  }
307  }
308 
309  return success();
310 }
311 
312 //===----------------------------------------------------------------------===//
313 // OpFilter
314 //===----------------------------------------------------------------------===//
315 
317  // All other ops: Allow/disallow according to filter.
318  bool isAllowed = !hasAllowRule();
319  for (const Entry &entry : entries) {
320  bool filterResult = entry.fn(op);
321  switch (entry.type) {
322  case Entry::ALLOW:
323  isAllowed |= filterResult;
324  break;
325  case Entry::DENY:
326  if (filterResult)
327  // DENY filter matches. This op is no allowed. (Even if other ALLOW
328  // filters may match.)
329  return false;
330  };
331  }
332  return isAllowed;
333 }
334 
335 //===----------------------------------------------------------------------===//
336 // BufferizationOptions
337 //===----------------------------------------------------------------------===//
338 
339 namespace {
340 
341 /// Default function arg type converter: Use a fully dynamic layout map.
343 defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace,
344  func::FuncOp funcOp,
345  const BufferizationOptions &options) {
346  return getMemRefTypeWithFullyDynamicLayout(type, memorySpace);
347 }
348 /// Default unknown type converter: Use a fully dynamic layout map.
350 defaultUnknownTypeConverter(TensorType tensorType, Attribute memorySpace,
351  const BufferizationOptions &options) {
352  return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
353 }
354 
355 } // namespace
356 
357 // Default constructor for BufferizationOptions.
359  : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
360  unknownTypeConverterFn(defaultUnknownTypeConverter) {}
361 
363  // Special case: If function boundary bufferization is deactivated, do not
364  // allow ops that belong to the `func` dialect.
365  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
366  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
367  return false;
368 
369  return opFilter.isOpAllowed(op);
370 }
371 
372 BufferizableOpInterface
374  if (!isOpAllowed(op))
375  return nullptr;
376  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
377  if (!bufferizableOp)
378  return nullptr;
379  return bufferizableOp;
380 }
381 
382 BufferizableOpInterface
384  return dynCastBufferizableOp(getOwnerOfValue(value));
385 }
386 
388  LayoutMapOption layoutMapOption) {
389  functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace,
390  func::FuncOp funcOp,
391  const BufferizationOptions &options) {
392  if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
394  memorySpace);
396  memorySpace);
397  };
399  layoutMapOption == LayoutMapOption::InferLayoutMap;
400 }
401 
402 //===----------------------------------------------------------------------===//
403 // Helper functions for BufferizableOpInterface
404 //===----------------------------------------------------------------------===//
405 
406 static void setInsertionPointAfter(OpBuilder &b, Value value) {
407  if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
408  b.setInsertionPointToStart(bbArg.getOwner());
409  } else {
411  }
412 }
413 
414 /// Determine which OpOperand* will alias with `value` if the op is bufferized
415 /// in place. Return all tensor OpOperand* if the op is not bufferizable.
417  if (Operation *op = getOwnerOfValue(value))
418  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
419  return bufferizableOp.getAliasingOpOperands(value, *this);
420 
421  // The op is not bufferizable.
423 }
424 
425 /// Determine which Values will alias with `opOperand` if the op is bufferized
426 /// in place. Return all tensor Values if the op is not bufferizable.
428  if (auto bufferizableOp =
429  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
430  return bufferizableOp.getAliasingValues(opOperand, *this);
431 
432  // The op is not bufferizable.
433  return detail::unknownGetAliasingValues(opOperand);
434 }
435 
436 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
437 /// op is not bufferizable.
439  if (auto bufferizableOp =
440  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
441  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
442 
443  // Unknown op that returns a tensor. The inplace analysis does not support it.
444  // Conservatively return true.
445  return true;
446 }
447 
448 /// Return true if `opOperand` bufferizes to a memory write. Return
449 /// `true` if the op is not bufferizable.
451  if (auto bufferizableOp =
452  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
453  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
454 
455  // Unknown op that returns a tensor. The inplace analysis does not support it.
456  // Conservatively return true.
457  return true;
458 }
459 
460 /// Return true if `opOperand` does neither read nor write but bufferizes to an
461 /// alias. Return false if the op is not bufferizable.
463  if (auto bufferizableOp =
464  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
465  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
466 
467  // Unknown op that returns a tensor. The inplace analysis does not support it.
468  // Conservatively return false.
469  return false;
470 }
471 
473  auto opResult = llvm::dyn_cast<OpResult>(value);
474  if (!opResult)
475  return true;
476  auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
477  if (!bufferizableOp)
478  return true;
479  return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
480 }
481 
482 /// Return true if the given value is read by an op that bufferizes to a memory
483 /// read. Also takes into account ops that create an alias but do not read by
484 /// themselves (e.g., ExtractSliceOp).
486  assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
487  SmallVector<OpOperand *> workingSet;
488  DenseSet<OpOperand *> visited;
489  for (OpOperand &use : value.getUses())
490  workingSet.push_back(&use);
491 
492  while (!workingSet.empty()) {
493  OpOperand *uMaybeReading = workingSet.pop_back_val();
494  if (!visited.insert(uMaybeReading).second)
495  continue;
496 
497  // Skip over all ops that neither read nor write (but create an alias).
498  if (bufferizesToAliasOnly(*uMaybeReading))
499  for (AliasingValue alias : getAliasingValues(*uMaybeReading))
500  for (OpOperand &use : alias.value.getUses())
501  workingSet.push_back(&use);
502  if (bufferizesToMemoryRead(*uMaybeReading))
503  return true;
504  }
505 
506  return false;
507 }
508 
509 // Starting from `opOperand`, follow the use-def chain in reverse, always
510 // selecting the aliasing OpOperands. Find and return Values for which
511 // `condition` evaluates to true. Uses of such matching Values are not
512 // traversed any further, the visited aliasing opOperands will be preserved
513 // through `visitedOpOperands`.
515  OpOperand *opOperand, llvm::function_ref<bool(Value)> condition,
517  llvm::DenseSet<OpOperand *> *visitedOpOperands) const {
518  llvm::DenseSet<Value> visited;
519  llvm::SetVector<Value> result, workingSet;
520  workingSet.insert(opOperand->get());
521 
522  if (visitedOpOperands)
523  visitedOpOperands->insert(opOperand);
524 
525  while (!workingSet.empty()) {
526  Value value = workingSet.pop_back_val();
527 
528  if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
529  // Stop traversal if value was already visited.
530  if (config.alwaysIncludeLeaves)
531  result.insert(value);
532  continue;
533  }
534  visited.insert(value);
535 
536  if (condition(value)) {
537  result.insert(value);
538  continue;
539  }
540 
541  if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
542  // Stop iterating if `followUnknownOps` is unset and the op is either
543  // not bufferizable or excluded in the OpFilter.
544  if (config.alwaysIncludeLeaves)
545  result.insert(value);
546  continue;
547  }
548 
550  if (aliases.getNumAliases() == 0) {
551  // The traversal ends naturally if there are no more OpOperands that
552  // could be followed.
553  if (config.alwaysIncludeLeaves)
554  result.insert(value);
555  continue;
556  }
557 
558  for (AliasingOpOperand a : aliases) {
559  if (config.followEquivalentOnly &&
560  a.relation != BufferRelation::Equivalent) {
561  // Stop iterating if `followEquivalentOnly` is set but the alias is not
562  // equivalent.
563  if (config.alwaysIncludeLeaves)
564  result.insert(value);
565  continue;
566  }
567 
568  if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
569  // Stop iterating if `followInPlaceOnly` is set but the alias is
570  // out-of-place.
571  if (config.alwaysIncludeLeaves)
572  result.insert(value);
573  continue;
574  }
575 
576  if (config.followSameTypeOrCastsOnly &&
577  a.opOperand->get().getType() != value.getType() &&
578  !value.getDefiningOp<CastOpInterface>()) {
579  // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
580  // has a different type and the op is not a cast.
581  if (config.alwaysIncludeLeaves)
582  result.insert(value);
583  continue;
584  }
585 
586  workingSet.insert(a.opOperand->get());
587  if (visitedOpOperands)
588  visitedOpOperands->insert(a.opOperand);
589  }
590  }
591 
592  return result;
593 }
594 
595 // Find the values that define the contents of the given operand's value.
599  config.alwaysIncludeLeaves = false;
601  opOperand, [&](Value v) { return this->bufferizesToMemoryWrite(v); },
602  config);
603 }
604 
607 
609  : options(options), type(type) {
611  options.stateInitializers)
612  fn(*this);
613 }
614 
616  // Do not copy if the tensor has undefined contents.
617  if (hasUndefinedContents(&opOperand))
618  return true;
619 
620  // Do not copy if the buffer of the tensor is entirely overwritten (with
621  // values that do not depend on the old tensor).
622  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
623  return true;
624 
625  // Do not copy if the tensor is never read.
626  AliasingValueList aliases = getAliasingValues(opOperand);
627  if (!bufferizesToMemoryRead(opOperand) &&
628  llvm::none_of(aliases,
629  [&](AliasingValue a) { return isValueRead(a.value); }))
630  return true;
631 
632  // Default: Cannot omit the copy.
633  return false;
634 }
635 
636 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
637  // ToBufferOps are always in-place.
638  if (isa<ToBufferOp>(opOperand.getOwner()))
639  return true;
640 
641  // In the absence of analysis information, OpOperands that bufferize to a
642  // memory write are out-of-place, i.e., an alloc and copy is inserted.
643  return !bufferizesToMemoryWrite(opOperand);
644 }
645 
647  // In the absence of analysis information, we do not know if the values are
648  // equivalent. The conservative answer is "false".
649  return false;
650 }
651 
653  // In the absence of analysis information, we do not know if the values may be
654  // aliasing. The conservative answer is "true".
655  return true;
656 }
657 
659  // In the absence of analysis information, the conservative answer is "false".
660  return false;
661 }
662 
663 // bufferization.to_buffer is not allowed to change the rank.
664 static void ensureToBufferOpIsValid(Value tensor, Type memrefType) {
665 #ifndef NDEBUG
666  auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.getType());
667  assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
668  rankedTensorType.getRank()) &&
669  "to_buffer would be invalid: mismatching ranks");
670 #endif
671 }
672 
673 FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
675  const BufferizationState &state) {
676 #ifndef NDEBUG
677  auto tensorType = llvm::dyn_cast<TensorLikeType>(value.getType());
678  assert(tensorType && "unexpected non-tensor type");
679 #endif // NDEBUG
680 
681  // Replace "%t = to_tensor %m" with %m.
682  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
683  return toTensorOp.getBuffer();
684 
685  // Insert to_buffer op.
686  OpBuilder::InsertionGuard g(rewriter);
687  setInsertionPointAfter(rewriter, value);
688  FailureOr<BufferLikeType> bufferType = getBufferType(value, options, state);
689  if (failed(bufferType))
690  return failure();
691  ensureToBufferOpIsValid(value, *bufferType);
692  return rewriter
693  .create<bufferization::ToBufferOp>(value.getLoc(), *bufferType, value)
694  .getResult();
695 }
696 
697 /// Return the buffer type for a given Value (tensor) after bufferization.
698 FailureOr<BufferLikeType>
700  const BufferizationState &state) {
701  SmallVector<Value> invocationStack;
702  return getBufferType(value, options, state, invocationStack);
703 }
704 
705 /// Return the buffer type for a given Value (tensor) after bufferization.
706 FailureOr<BufferLikeType>
708  const BufferizationState &state,
709  SmallVector<Value> &invocationStack) {
710  assert(llvm::isa<TensorLikeType>(value.getType()) &&
711  "unexpected non-tensor type");
712  invocationStack.push_back(value);
713  auto popFromStack =
714  llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
715 
716  // Try querying BufferizableOpInterface.
717  Operation *op = getOwnerOfValue(value);
718  auto bufferizableOp = options.dynCastBufferizableOp(op);
719  if (bufferizableOp)
720  return bufferizableOp.getBufferType(value, options, state, invocationStack);
721 
722  // Op is not bufferizable.
723  return cast<TensorLikeType>(value.getType()).getBufferType(options, [&]() {
724  return op->emitError();
725  });
726 }
727 
729  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
730  return bufferizableOp.hasTensorSemantics();
732 }
733 
735  Operation *op,
736  ValueRange values) {
737  assert(values.size() == op->getNumResults() &&
738  "expected one value per OpResult");
739  OpBuilder::InsertionGuard g(rewriter);
740 
741  // Replace all OpResults with the given values.
742  SmallVector<Value> replacements;
743  for (OpResult opResult : op->getOpResults()) {
744  Value replacement = values[opResult.getResultNumber()];
745  if (llvm::isa<TensorLikeType>(opResult.getType())) {
746  // The OpResult is a tensor. Such values are replaced with memrefs during
747  // bufferization.
748  assert(llvm::isa<BufferLikeType>(replacement.getType()) &&
749  "tensor op result should be replaced with a buffer value");
750  // The existing uses of the OpResult still expect a tensor. Insert a
751  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
752  // loose all of its users and eventually DCE away.
753  rewriter.setInsertionPointAfter(op);
754  replacement = rewriter.create<bufferization::ToTensorOp>(
755  replacement.getLoc(), opResult.getType(), replacement);
756  }
757  replacements.push_back(replacement);
758  }
759 
760  rewriter.replaceOp(op, replacements);
761 }
762 
763 //===----------------------------------------------------------------------===//
764 // Bufferization-specific scoped alloc insertion support.
765 //===----------------------------------------------------------------------===//
766 
767 /// Create a memref allocation with the given type and dynamic extents.
769  MemRefType type,
770  ValueRange dynShape) const {
771  if (allocationFn)
772  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
773 
774  // Default bufferallocation via AllocOp.
775  if (bufferAlignment != 0)
776  return b
777  .create<memref::AllocOp>(loc, type, dynShape,
779  .getResult();
780  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
781 }
782 
783 /// Create a memory copy between two memref buffers.
785  Value from, Value to) const {
786  if (memCpyFn)
787  return (*memCpyFn)(b, loc, from, to);
788 
789  b.create<memref::CopyOp>(loc, from, to);
790  return success();
791 }
792 
793 //===----------------------------------------------------------------------===//
794 // Bufferization-specific IRMapping support with debugging.
795 //===----------------------------------------------------------------------===//
796 
799  MemRefLayoutAttrInterface layout,
800  Attribute memorySpace) {
801  // Case 1: Unranked memref type.
802  if (auto unrankedTensorType =
803  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
804  assert(!layout && "UnrankedTensorType cannot have a layout map");
805  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
806  memorySpace);
807  }
808 
809  // Case 2: Ranked memref type with specified layout.
810  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
811  if (layout) {
812  return MemRefType::get(rankedTensorType.getShape(),
813  rankedTensorType.getElementType(), layout,
814  memorySpace);
815  }
816 
817  return options.unknownTypeConverterFn(tensorType, memorySpace, options);
818 }
819 
822  Attribute memorySpace) {
823  // Case 1: Unranked memref type.
824  if (auto unrankedTensorType =
825  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
826  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
827  memorySpace);
828  }
829 
830  // Case 2: Ranked memref type.
831  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
832  int64_t dynamicOffset = ShapedType::kDynamic;
833  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
834  ShapedType::kDynamic);
835  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
836  dynamicOffset, dynamicStrides);
837  return MemRefType::get(rankedTensorType.getShape(),
838  rankedTensorType.getElementType(), stridedLayout,
839  memorySpace);
840 }
841 
842 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
843 /// the given tensor type is unranked, return an unranked MemRef type.
846  Attribute memorySpace) {
847  // Case 1: Unranked memref type.
848  if (auto unrankedTensorType =
849  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
850  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
851  memorySpace);
852  }
853 
854  // Case 2: Ranked memref type.
855  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
856  MemRefLayoutAttrInterface layout = {};
857  return MemRefType::get(rankedTensorType.getShape(),
858  rankedTensorType.getElementType(), layout,
859  memorySpace);
860 }
861 
862 //===----------------------------------------------------------------------===//
863 // Default implementations of interface methods
864 //===----------------------------------------------------------------------===//
865 
867  OpResult opResult, const AnalysisState &state) {
868  auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
869  AliasingOpOperandList opOperands =
870  bufferizableOp.getAliasingOpOperands(opResult, state);
871 
872  // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
873  // memory writes.
874  if (opOperands.getAliases().empty())
875  return true;
876 
877  // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
878  // may bufferize to a memory write.
879  if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
880  return state.bufferizesToMemoryWrite(*alias.opOperand);
881  }))
882  return true;
883 
884  // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
885  // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
886  // case, the OpResult bufferizes to a memory write. E.g.:
887  //
888  // %0 = "some_writing_op" : tensor<?xf32>
889  // %r = scf.if ... -> tensor<?xf32> {
890  // scf.yield %0 : tensor<?xf32>
891  // } else {
892  // %1 = "another_writing_op"(%0) : tensor<?xf32>
893  // scf.yield %1 : tensor<?xf32>
894  // }
895  // "some_reading_op"(%r)
896  //
897  // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
898  // bufferizes to a memory write and the defining op is inside the scf.if.
899  //
900  // Note: This treatment of surrouding ops is useful for ops that have a
901  // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
902  // the analysis considerably.
903  //
904  // "another_writing_op" in the above example should be able to bufferize
905  // inplace in the absence of another read of %0. However, if the scf.if op
906  // would not be considered a "write", the analysis would detect the
907  // following conflict:
908  //
909  // * read = some_reading_op
910  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
911  // * conflictingWrite = %1
912  //
913  auto isMemoryWriteInsideOp = [&](Value v) {
914  Operation *op = getOwnerOfValue(v);
915  if (!opResult.getDefiningOp()->isAncestor(op))
916  return false;
917  return state.bufferizesToMemoryWrite(v);
918  };
920  config.alwaysIncludeLeaves = false;
921  for (AliasingOpOperand alias : opOperands) {
922  if (!state
923  .findValueInReverseUseDefChain(alias.opOperand,
924  isMemoryWriteInsideOp, config)
925  .empty())
926  return true;
927  }
928  return false;
929 }
930 
931 // Compute the AliasingOpOperandList for a given Value based on
932 // getAliasingValues.
934  Value value, const AnalysisState &state) {
935  Operation *op = getOwnerOfValue(value);
937  for (OpOperand &opOperand : op->getOpOperands()) {
938  if (!llvm::isa<TensorType>(opOperand.get().getType()))
939  continue;
940  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
941  for (const auto &it : aliasingValues)
942  if (it.value == value)
943  result.emplace_back(&opOperand, it.relation, it.isDefinite);
944  }
945  return AliasingOpOperandList(std::move(result));
946 }
947 
949  Value value, const BufferizationOptions &options,
950  const BufferizationState &bufferizationState,
951  SmallVector<Value> &invocationStack) {
952  assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
953  auto tensorType = cast<TensorType>(value.getType());
954 
955  // No further analysis is possible for a block argument.
956  if (llvm::isa<BlockArgument>(value))
957  return bufferization::getMemRefType(tensorType, options);
958 
959  // Value is an OpResult.
960  Operation *op = getOwnerOfValue(value);
961  auto opResult = llvm::cast<OpResult>(value);
962  AnalysisState analysisState(options);
963  AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
964  if (aliases.getNumAliases() > 0 &&
965  aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
966  // If the OpResult has an equivalent OpOperand, both OpResult and
967  // OpOperand bufferize to the exact same buffer type.
968  Value equivalentOperand = aliases.getAliases().front().opOperand->get();
969  return asMemRefType(getBufferType(equivalentOperand, options,
970  bufferizationState, invocationStack));
971  }
972 
973  // If we do not know the memory space and there is no default memory space,
974  // report a failure.
975  auto memSpace =
976  options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
977  if (!memSpace.has_value())
978  return op->emitError("could not infer memory space");
979 
980  return getMemRefType(tensorType, options, /*layout=*/{}, *memSpace);
981 }
982 
984  BufferizableOpInterface bufferizableOp, unsigned index) {
985  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
986  auto regionInterface =
987  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
988  if (!regionInterface)
989  return false;
990  return regionInterface.isRepetitiveRegion(index);
991 }
992 
995  // TODO: Take into account successor blocks.
996  // No aliasing in case of non-entry blocks.
997  if (auto bbArg = dyn_cast<BlockArgument>(value))
998  if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
999  return {};
1000 
1001  // Unknown op: Conservatively assume that each OpResult may alias with every
1002  // OpOperand. In addition, each block argument of an entry block may alias
1003  // with every OpOperand.
1005  for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
1006  if (isa<TensorType>(operand.get().getType()))
1007  r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
1008  return r;
1009 }
1010 
1013  // TODO: Take into account successor blocks.
1014  // Unknown op: Conservatively assume that each OpResult may alias with every
1015  // OpOperand. In addition, each block argument of an entry block may alias
1016  // with every OpOperand.
1018  for (OpResult result : opOperand.getOwner()->getOpResults())
1019  if (llvm::isa<TensorType>(result.getType()))
1020  r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
1021  for (Region &region : opOperand.getOwner()->getRegions())
1022  if (!region.getBlocks().empty())
1023  for (BlockArgument bbArg : region.getBlocks().front().getArguments())
1024  if (isa<TensorType>(bbArg.getType()))
1025  r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
1026  return r;
1027 }
1028 
1030  auto isaTensor = [](Type t) { return isa<TensorLikeType>(t); };
1031  bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
1032  return any_of(r.getBlocks(), [&](Block &b) {
1033  return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1034  return isaTensor(bbArg.getType());
1035  });
1036  });
1037  });
1038  if (hasTensorBlockArgument)
1039  return true;
1040 
1041  if (any_of(op->getResultTypes(), isaTensor))
1042  return true;
1043  return any_of(op->getOperandTypes(), isaTensor);
1044 }
1045 
1046 FailureOr<BaseMemRefType>
1047 bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1048  if (failed(bufferType))
1049  return failure();
1050  return cast<BaseMemRefType>(*bufferType);
1051 }
1052 
1054  Value tensor,
1055  Value buffer) {
1056  return mlir::succeeded(
1057  cast<TensorLikeType>(tensor.getType())
1058  .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.getType()),
1059  [&]() { return op.emitError(); }));
1060 }
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void ensureToBufferOpIsValid(Value tensor, Type memrefType)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:323
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:102
This class represents an argument of a Block.
Definition: Value.h:309
Block represents an ordered list of Operations.
Definition: Block.h:33
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:29
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:110
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:346
This class helps build Operations.
Definition: Builders.h:205
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:429
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:396
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:455
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:410
This class represents an operand of an operation.
Definition: Value.h:257
This is a value defined by a result of an operation.
Definition: Value.h:447
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:407
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition: Operation.h:234
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:213
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:677
operand_type_range getOperandTypes()
Definition: Operation.h:397
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:383
result_type_range getResultTypes()
Definition: Operation.h:428
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition: Operation.h:263
result_range getOpResults()
Definition: Operation.h:420
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition: Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition: Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition: Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition: Region.h:200
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:358
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:594
This class represents a collection of SymbolTables.
Definition: SymbolTable.h:283
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:55
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:107
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:188
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition: Value.cpp:41
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
SetVector< Value > findValueInReverseUseDefChain(OpOperand *opOperand, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig(), llvm::DenseSet< OpOperand * > *visitedOpOperands=nullptr) const
Starting from opOperand, follow the use-def chain in reverse, always selecting the aliasing OpOperand...
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool insideMutuallyExclusiveRegions(Operation *op0, Operation *op1)
Checks whether op0 and op1 are inside mutually exclusive regions.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
SetVector< Value > findDefinitions(OpOperand *opOperand) const
Find the values that may define the contents of the given value at runtime.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
BufferizationState provides information about the state of the IR during the bufferization process.
SymbolTableCollection & getSymbolTables()
Get a reference to the collection of cached symbol tables.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
FailureOr< BaseMemRefType > asMemRefType(FailureOr< BufferLikeType > bufferType)
This is a helper function used when buffer type is guaranteed to be memref.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
bool typesMatchAfterBufferization(Operation &op, Value tensor, Value buffer)
This function is a free-standing helper that relies on bufferization::TensorLikeTypeInterface to veri...
BaseMemRefType getMemRefType(TensorType tensorType, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the TensorType can be bufferized.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options, const BufferizationState &state)
Lookup the buffer for the given value.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
FailureOr< BufferLikeType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, const BufferizationState &state, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
Definition: MemRefOps.cpp:60
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.