MLIR  22.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
15 #include "mlir/IR/AsmState.h"
16 #include "mlir/IR/Operation.h"
17 #include "mlir/IR/TypeUtilities.h"
18 #include "mlir/IR/Value.h"
20 #include "llvm/ADT/ScopeExit.h"
21 
22 //===----------------------------------------------------------------------===//
23 // BufferizableOpInterface
24 //===----------------------------------------------------------------------===//
25 
26 namespace mlir {
27 namespace bufferization {
28 
29 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
30 
31 } // namespace bufferization
32 } // namespace mlir
33 
35 
36 #define DEBUG_TYPE "bufferizable-op-interface"
37 
38 using namespace mlir;
39 using namespace bufferization;
40 
41 static bool isRepetitiveRegion(Region *region,
43  Operation *op = region->getParentOp();
44  if (auto bufferizableOp = options.dynCastBufferizableOp(op))
45  if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
46  return true;
47  return false;
48 }
49 
52  if (!op->getBlock())
53  return nullptr;
54  if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
55  iter != enclosingRepetitiveRegionCache.end())
56  return iter->second;
57  return enclosingRepetitiveRegionCache[op] =
58  getEnclosingRepetitiveRegion(op->getBlock(), options);
59 }
60 
62  Value value, const BufferizationOptions &options) {
63  if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
64  iter != enclosingRepetitiveRegionCache.end())
65  return iter->second;
66 
67  Region *region = value.getParentRegion();
68  // Collect all visited regions since we only know the repetitive region we
69  // want to map it to later on
70  SmallVector<Region *> visitedRegions;
71  while (region) {
72  visitedRegions.push_back(region);
73  if (isRepetitiveRegion(region, options))
74  break;
75  region = region->getParentRegion();
76  }
77  enclosingRepetitiveRegionCache[value] = region;
78  for (Region *r : visitedRegions)
79  enclosingRepetitiveRegionCache[r] = region;
80  return region;
81 }
82 
84  Block *block, const BufferizationOptions &options) {
85  if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
86  iter != enclosingRepetitiveRegionCache.end())
87  return iter->second;
88 
89  Region *region = block->getParent();
90  Operation *op = nullptr;
91  // Collect all visited regions since we only know the repetitive region we
92  // want to map it to later on
93  SmallVector<Region *> visitedRegions;
94  do {
95  op = region->getParentOp();
96  if (isRepetitiveRegion(region, options))
97  break;
98  } while ((region = op->getParentRegion()));
99 
100  enclosingRepetitiveRegionCache[block] = region;
101  for (Region *r : visitedRegions)
102  enclosingRepetitiveRegionCache[r] = region;
103  return region;
104 }
105 
107  Operation *op1) {
108  auto key = std::make_pair(op0, op1);
109  if (auto iter = insideMutuallyExclusiveRegionsCache.find(key);
110  iter != insideMutuallyExclusiveRegionsCache.end())
111  return iter->second;
112  bool result = ::mlir::insideMutuallyExclusiveRegions(op0, op1);
113  // Populate results for both orderings of the ops.
114  insideMutuallyExclusiveRegionsCache[key] = result;
115  insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] = result;
116  return result;
117 }
118 
120  enclosingRepetitiveRegionCache.clear();
121  insideMutuallyExclusiveRegionsCache.clear();
122 }
123 
125  return symbolTables;
126 }
127 
129  Region *region, const BufferizationOptions &options) {
130  assert(isRepetitiveRegion(region, options) && "expected repetitive region");
131  while ((region = region->getParentRegion())) {
132  if (isRepetitiveRegion(region, options))
133  break;
134  }
135  return region;
136 }
137 
139  const BufferizationOptions &options) {
140  while (region) {
141  auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
142  if (bufferizableOp &&
143  bufferizableOp.isParallelRegion(region->getRegionNumber())) {
144  assert(isRepetitiveRegion(region, options) &&
145  "expected that all parallel regions are also repetitive regions");
146  return region;
147  }
148  region = region->getParentRegion();
149  }
150  return nullptr;
151 }
152 
154  if (auto opResult = llvm::dyn_cast<OpResult>(value))
155  return opResult.getDefiningOp();
156  return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
157 }
158 
159 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
160 /// shaped value is copied. Otherwise, a tensor with undefined contents is
161 /// allocated.
163  OpBuilder &b, Location loc, Value shapedValue,
164  const BufferizationOptions &options, const BufferizationState &state,
165  bool copy) {
166  Value tensor;
167  if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
168  tensor = shapedValue;
169  } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
170  tensor = ToTensorOp::create(
171  b, loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()),
172  shapedValue);
173  } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
174  llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
175  return getOwnerOfValue(shapedValue)
176  ->emitError("copying of unranked tensors is not implemented");
177  } else {
178  llvm_unreachable("expected RankedTensorType or MemRefType");
179  }
180  RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
181  SmallVector<Value> dynamicSizes;
182  if (!copy) {
183  // Compute the dynamic part of the shape.
184  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
185  bool reifiedShapes = false;
186  if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
187  llvm::isa<OpResult>(shapedValue)) {
188  ReifiedRankedShapedTypeDims resultDims;
189  if (succeeded(
190  reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
191  reifiedShapes = true;
192  auto &shape =
193  resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
194  for (const auto &dim : enumerate(tensorType.getShape())) {
195  if (ShapedType::isDynamic(dim.value())) {
196  dynamicSizes.push_back(
197  getValueOrCreateConstantIndexOp(b, loc, shape[dim.index()]));
198  }
199  }
200  }
201  }
202 
203  // If the shape could not be reified, create DimOps.
204  if (!reifiedShapes)
205  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
206  }
207 
208  // Create AllocTensorOp.
209  auto allocTensorOp = AllocTensorOp::create(b, loc, tensorType, dynamicSizes,
210  copy ? tensor : Value());
211 
212  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
213  if (copy)
214  return allocTensorOp.getResult();
215  auto copyBufferType =
216  detail::asMemRefType(getBufferType(tensor, options, state));
217  if (failed(copyBufferType))
218  return failure();
219  std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
220  if (!memorySpace)
221  memorySpace = options.defaultMemorySpaceFn(tensorType);
222  if (memorySpace.has_value())
223  allocTensorOp.setMemorySpaceAttr(memorySpace.value());
224  return allocTensorOp.getResult();
225 }
226 
227 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
228  RewriterBase &rewriter, const AnalysisState &analysisState,
229  const BufferizationState &bufferizationState) {
230  OpBuilder::InsertionGuard g(rewriter);
231  Operation *op = getOperation();
232  SmallVector<OpOperand *> outOfPlaceOpOperands;
233  DenseSet<OpOperand *> copiedOpOperands;
234  SmallVector<Value> outOfPlaceValues;
235  DenseSet<Value> copiedOpValues;
236 
237  // Find all out-of-place OpOperands.
238  for (OpOperand &opOperand : op->getOpOperands()) {
239  Type operandType = opOperand.get().getType();
240  if (!llvm::isa<TensorType>(operandType))
241  continue;
242  if (analysisState.isInPlace(opOperand))
243  continue;
244  if (llvm::isa<UnrankedTensorType>(operandType))
245  return op->emitError("copying of unranked tensors is not implemented");
246 
247  AliasingValueList aliasingValues =
248  analysisState.getAliasingValues(opOperand);
249  if (aliasingValues.getNumAliases() == 1 &&
250  isa<OpResult>(aliasingValues.getAliases()[0].value) &&
251  !analysisState.bufferizesToMemoryWrite(opOperand) &&
252  analysisState
253  .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
254  .getNumAliases() == 1 &&
255  !isa<UnrankedTensorType>(
256  aliasingValues.getAliases()[0].value.getType())) {
257  // The op itself does not write but may create exactly one alias. Instead
258  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
259  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
260  // where the result is usually a smaller part of the source). Do not apply
261  // this optimization if the OpResult is an unranked tensor (because those
262  // cannot be copied at the moment).
263  Value value = aliasingValues.getAliases()[0].value;
264  outOfPlaceValues.push_back(value);
265  if (!analysisState.canOmitTensorCopy(opOperand))
266  copiedOpValues.insert(value);
267  } else {
268  // In all other cases, make a copy of the OpOperand.
269  outOfPlaceOpOperands.push_back(&opOperand);
270  if (!analysisState.canOmitTensorCopy(opOperand))
271  copiedOpOperands.insert(&opOperand);
272  }
273  }
274 
275  // Insert copies of OpOperands.
276  rewriter.setInsertionPoint(op);
277  for (OpOperand *opOperand : outOfPlaceOpOperands) {
278  FailureOr<Value> copy = allocateTensorForShapedValue(
279  rewriter, op->getLoc(), opOperand->get(), analysisState.getOptions(),
280  bufferizationState, copiedOpOperands.contains(opOperand));
281  if (failed(copy))
282  return failure();
283  rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
284  }
285 
286  // Insert copies of Values.
287  rewriter.setInsertionPointAfter(op);
288  for (Value value : outOfPlaceValues) {
289  FailureOr<Value> copy = allocateTensorForShapedValue(
290  rewriter, op->getLoc(), value, analysisState.getOptions(),
291  bufferizationState, copiedOpValues.count(value));
292  if (failed(copy))
293  return failure();
294  SmallVector<OpOperand *> uses = llvm::to_vector(
295  llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
296  for (OpOperand *use : uses) {
297  // Do not update the alloc_tensor op that we just created.
298  if (use->getOwner() == copy->getDefiningOp())
299  continue;
300  // tensor.dim ops may have been created to be used as alloc_tensor op
301  // dynamic extents. Do not update these either.
302  if (isa<tensor::DimOp>(use->getOwner()))
303  continue;
304  rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
305  }
306  }
307 
308  return success();
309 }
310 
311 //===----------------------------------------------------------------------===//
312 // OpFilter
313 //===----------------------------------------------------------------------===//
314 
316  // All other ops: Allow/disallow according to filter.
317  bool isAllowed = !hasAllowRule();
318  for (const Entry &entry : entries) {
319  bool filterResult = entry.fn(op);
320  switch (entry.type) {
321  case Entry::ALLOW:
322  isAllowed |= filterResult;
323  break;
324  case Entry::DENY:
325  if (filterResult)
326  // DENY filter matches. This op is no allowed. (Even if other ALLOW
327  // filters may match.)
328  return false;
329  };
330  }
331  return isAllowed;
332 }
333 
334 //===----------------------------------------------------------------------===//
335 // BufferizationOptions
336 //===----------------------------------------------------------------------===//
337 
338 namespace {
339 
340 /// Default function arg type converter: Use a fully dynamic layout map.
342 defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace,
343  func::FuncOp funcOp,
344  const BufferizationOptions &options) {
345  return getMemRefTypeWithFullyDynamicLayout(type, memorySpace);
346 }
347 /// Default unknown type converter: Use a fully dynamic layout map.
349 defaultUnknownTypeConverter(TensorType tensorType, Attribute memorySpace,
350  const BufferizationOptions &options) {
351  return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
352 }
353 
354 } // namespace
355 
356 // Default constructor for BufferizationOptions.
358  : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
359  unknownTypeConverterFn(defaultUnknownTypeConverter) {}
360 
362  // Special case: If function boundary bufferization is deactivated, do not
363  // allow ops that belong to the `func` dialect.
364  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
365  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
366  return false;
367 
368  return opFilter.isOpAllowed(op);
369 }
370 
371 BufferizableOpInterface
373  if (!isOpAllowed(op))
374  return nullptr;
375  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
376  if (!bufferizableOp)
377  return nullptr;
378  return bufferizableOp;
379 }
380 
381 BufferizableOpInterface
383  return dynCastBufferizableOp(getOwnerOfValue(value));
384 }
385 
387  LayoutMapOption layoutMapOption) {
388  functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace,
389  func::FuncOp funcOp,
390  const BufferizationOptions &options) {
391  if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
393  memorySpace);
395  memorySpace);
396  };
398  layoutMapOption == LayoutMapOption::InferLayoutMap;
399 }
400 
401 //===----------------------------------------------------------------------===//
402 // Helper functions for BufferizableOpInterface
403 //===----------------------------------------------------------------------===//
404 
405 static void setInsertionPointAfter(OpBuilder &b, Value value) {
406  if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
407  b.setInsertionPointToStart(bbArg.getOwner());
408  } else {
410  }
411 }
412 
413 /// Determine which OpOperand* will alias with `value` if the op is bufferized
414 /// in place. Return all tensor OpOperand* if the op is not bufferizable.
416  if (Operation *op = getOwnerOfValue(value))
417  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
418  return bufferizableOp.getAliasingOpOperands(value, *this);
419 
420  // The op is not bufferizable.
422 }
423 
424 /// Determine which Values will alias with `opOperand` if the op is bufferized
425 /// in place. Return all tensor Values if the op is not bufferizable.
427  if (auto bufferizableOp =
428  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
429  return bufferizableOp.getAliasingValues(opOperand, *this);
430 
431  // The op is not bufferizable.
432  return detail::unknownGetAliasingValues(opOperand);
433 }
434 
435 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
436 /// op is not bufferizable.
438  if (auto bufferizableOp =
439  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
440  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
441 
442  // Unknown op that returns a tensor. The inplace analysis does not support it.
443  // Conservatively return true.
444  return true;
445 }
446 
447 /// Return true if `opOperand` bufferizes to a memory write. Return
448 /// `true` if the op is not bufferizable.
450  if (auto bufferizableOp =
451  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
452  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
453 
454  // Unknown op that returns a tensor. The inplace analysis does not support it.
455  // Conservatively return true.
456  return true;
457 }
458 
459 /// Return true if `opOperand` does neither read nor write but bufferizes to an
460 /// alias. Return false if the op is not bufferizable.
462  if (auto bufferizableOp =
463  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
464  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
465 
466  // Unknown op that returns a tensor. The inplace analysis does not support it.
467  // Conservatively return false.
468  return false;
469 }
470 
472  auto opResult = llvm::dyn_cast<OpResult>(value);
473  if (!opResult)
474  return true;
475  auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
476  if (!bufferizableOp)
477  return true;
478  return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
479 }
480 
481 /// Return true if the given value is read by an op that bufferizes to a memory
482 /// read. Also takes into account ops that create an alias but do not read by
483 /// themselves (e.g., ExtractSliceOp).
485  assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
486  SmallVector<OpOperand *> workingSet;
487  DenseSet<OpOperand *> visited;
488  for (OpOperand &use : value.getUses())
489  workingSet.push_back(&use);
490 
491  while (!workingSet.empty()) {
492  OpOperand *uMaybeReading = workingSet.pop_back_val();
493  if (!visited.insert(uMaybeReading).second)
494  continue;
495 
496  // Skip over all ops that neither read nor write (but create an alias).
497  if (bufferizesToAliasOnly(*uMaybeReading))
498  for (AliasingValue alias : getAliasingValues(*uMaybeReading))
499  for (OpOperand &use : alias.value.getUses())
500  workingSet.push_back(&use);
501  if (bufferizesToMemoryRead(*uMaybeReading))
502  return true;
503  }
504 
505  return false;
506 }
507 
508 // Starting from `opOperand`, follow the use-def chain in reverse, always
509 // selecting the aliasing OpOperands. Find and return Values for which
510 // `condition` evaluates to true. Uses of such matching Values are not
511 // traversed any further, the visited aliasing opOperands will be preserved
512 // through `visitedOpOperands`.
514  OpOperand *opOperand, llvm::function_ref<bool(Value)> condition,
516  llvm::DenseSet<OpOperand *> *visitedOpOperands) const {
517  llvm::DenseSet<Value> visited;
518  llvm::SetVector<Value> result, workingSet;
519  workingSet.insert(opOperand->get());
520 
521  if (visitedOpOperands)
522  visitedOpOperands->insert(opOperand);
523 
524  while (!workingSet.empty()) {
525  Value value = workingSet.pop_back_val();
526 
527  if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
528  // Stop traversal if value was already visited.
529  if (config.alwaysIncludeLeaves)
530  result.insert(value);
531  continue;
532  }
533  visited.insert(value);
534 
535  if (condition(value)) {
536  result.insert(value);
537  continue;
538  }
539 
540  if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
541  // Stop iterating if `followUnknownOps` is unset and the op is either
542  // not bufferizable or excluded in the OpFilter.
543  if (config.alwaysIncludeLeaves)
544  result.insert(value);
545  continue;
546  }
547 
549  if (aliases.getNumAliases() == 0) {
550  // The traversal ends naturally if there are no more OpOperands that
551  // could be followed.
552  if (config.alwaysIncludeLeaves)
553  result.insert(value);
554  continue;
555  }
556 
557  for (AliasingOpOperand a : aliases) {
558  if (config.followEquivalentOnly &&
559  a.relation != BufferRelation::Equivalent) {
560  // Stop iterating if `followEquivalentOnly` is set but the alias is not
561  // equivalent.
562  if (config.alwaysIncludeLeaves)
563  result.insert(value);
564  continue;
565  }
566 
567  if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
568  // Stop iterating if `followInPlaceOnly` is set but the alias is
569  // out-of-place.
570  if (config.alwaysIncludeLeaves)
571  result.insert(value);
572  continue;
573  }
574 
575  if (config.followSameTypeOrCastsOnly &&
576  a.opOperand->get().getType() != value.getType() &&
577  !value.getDefiningOp<CastOpInterface>()) {
578  // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
579  // has a different type and the op is not a cast.
580  if (config.alwaysIncludeLeaves)
581  result.insert(value);
582  continue;
583  }
584 
585  workingSet.insert(a.opOperand->get());
586  if (visitedOpOperands)
587  visitedOpOperands->insert(a.opOperand);
588  }
589  }
590 
591  return result;
592 }
593 
594 // Find the values that define the contents of the given operand's value.
598  config.alwaysIncludeLeaves = false;
600  opOperand, [&](Value v) { return this->bufferizesToMemoryWrite(v); },
601  config);
602 }
603 
606 
608  : options(options), type(type) {
610  options.stateInitializers)
611  fn(*this);
612 }
613 
615  // Do not copy if the tensor has undefined contents.
616  if (hasUndefinedContents(&opOperand))
617  return true;
618 
619  // Do not copy if the buffer of the tensor is entirely overwritten (with
620  // values that do not depend on the old tensor).
621  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
622  return true;
623 
624  // Do not copy if the tensor is never read.
625  AliasingValueList aliases = getAliasingValues(opOperand);
626  if (!bufferizesToMemoryRead(opOperand) &&
627  llvm::none_of(aliases,
628  [&](AliasingValue a) { return isValueRead(a.value); }))
629  return true;
630 
631  // Default: Cannot omit the copy.
632  return false;
633 }
634 
635 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
636  // ToBufferOps are always in-place.
637  if (isa<ToBufferOp>(opOperand.getOwner()))
638  return true;
639 
640  // In the absence of analysis information, OpOperands that bufferize to a
641  // memory write are out-of-place, i.e., an alloc and copy is inserted.
642  return !bufferizesToMemoryWrite(opOperand);
643 }
644 
646  // In the absence of analysis information, we do not know if the values are
647  // equivalent. The conservative answer is "false".
648  return false;
649 }
650 
652  // In the absence of analysis information, we do not know if the values may be
653  // aliasing. The conservative answer is "true".
654  return true;
655 }
656 
658  // In the absence of analysis information, the conservative answer is "false".
659  return false;
660 }
661 
662 // bufferization.to_buffer is not allowed to change the rank.
663 static void ensureToBufferOpIsValid(Value tensor, Type memrefType) {
664 #ifndef NDEBUG
665  auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.getType());
666  assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
667  rankedTensorType.getRank()) &&
668  "to_buffer would be invalid: mismatching ranks");
669 #endif
670 }
671 
672 FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
674  const BufferizationState &state) {
675 #ifndef NDEBUG
676  auto tensorType = llvm::dyn_cast<TensorLikeType>(value.getType());
677  assert(tensorType && "unexpected non-tensor type");
678 #endif // NDEBUG
679 
680  // Replace "%t = to_tensor %m" with %m.
681  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
682  return toTensorOp.getBuffer();
683 
684  // Insert to_buffer op.
685  OpBuilder::InsertionGuard g(rewriter);
686  setInsertionPointAfter(rewriter, value);
687  FailureOr<BufferLikeType> bufferType = getBufferType(value, options, state);
688  if (failed(bufferType))
689  return failure();
690  ensureToBufferOpIsValid(value, *bufferType);
691  return bufferization::ToBufferOp::create(rewriter, value.getLoc(),
692  *bufferType, value)
693  .getResult();
694 }
695 
696 /// Return the buffer type for a given Value (tensor) after bufferization.
697 FailureOr<BufferLikeType>
699  const BufferizationState &state) {
700  SmallVector<Value> invocationStack;
701  return getBufferType(value, options, state, invocationStack);
702 }
703 
704 /// Return the buffer type for a given Value (tensor) after bufferization.
705 FailureOr<BufferLikeType>
707  const BufferizationState &state,
708  SmallVector<Value> &invocationStack) {
709  assert(llvm::isa<TensorLikeType>(value.getType()) &&
710  "unexpected non-tensor type");
711  invocationStack.push_back(value);
712  auto popFromStack =
713  llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
714 
715  // Try querying BufferizableOpInterface.
716  Operation *op = getOwnerOfValue(value);
717  auto bufferizableOp = options.dynCastBufferizableOp(op);
718  if (bufferizableOp)
719  return bufferizableOp.getBufferType(value, options, state, invocationStack);
720 
721  // Op is not bufferizable.
722  return cast<TensorLikeType>(value.getType()).getBufferType(options, [&]() {
723  return op->emitError();
724  });
725 }
726 
728  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
729  return bufferizableOp.hasTensorSemantics();
731 }
732 
734  Operation *op,
735  ValueRange values) {
736  assert(values.size() == op->getNumResults() &&
737  "expected one value per OpResult");
738  OpBuilder::InsertionGuard g(rewriter);
739 
740  // Replace all OpResults with the given values.
741  SmallVector<Value> replacements;
742  for (OpResult opResult : op->getOpResults()) {
743  Value replacement = values[opResult.getResultNumber()];
744  if (llvm::isa<TensorLikeType>(opResult.getType())) {
745  // The OpResult is a tensor. Such values are replaced with memrefs during
746  // bufferization.
747  assert(llvm::isa<BufferLikeType>(replacement.getType()) &&
748  "tensor op result should be replaced with a buffer value");
749  // The existing uses of the OpResult still expect a tensor. Insert a
750  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
751  // loose all of its users and eventually DCE away.
752  rewriter.setInsertionPointAfter(op);
753  replacement = bufferization::ToTensorOp::create(
754  rewriter, replacement.getLoc(), opResult.getType(), replacement);
755  }
756  replacements.push_back(replacement);
757  }
758 
759  rewriter.replaceOp(op, replacements);
760 }
761 
762 //===----------------------------------------------------------------------===//
763 // Bufferization-specific scoped alloc insertion support.
764 //===----------------------------------------------------------------------===//
765 
766 /// Create a memref allocation with the given type and dynamic extents.
768  MemRefType type,
769  ValueRange dynShape) const {
770  if (allocationFn)
771  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
772 
773  // Default bufferallocation via AllocOp.
774  if (bufferAlignment != 0)
775  return memref::AllocOp::create(b, loc, type, dynShape,
777  .getResult();
778  return memref::AllocOp::create(b, loc, type, dynShape).getResult();
779 }
780 
781 /// Create a memory copy between two memref buffers.
783  Value from, Value to) const {
784  if (memCpyFn)
785  return (*memCpyFn)(b, loc, from, to);
786 
787  memref::CopyOp::create(b, loc, from, to);
788  return success();
789 }
790 
791 //===----------------------------------------------------------------------===//
792 // Bufferization-specific IRMapping support with debugging.
793 //===----------------------------------------------------------------------===//
794 
797  MemRefLayoutAttrInterface layout,
798  Attribute memorySpace) {
799  // Case 1: Unranked memref type.
800  if (auto unrankedTensorType =
801  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
802  assert(!layout && "UnrankedTensorType cannot have a layout map");
803  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
804  memorySpace);
805  }
806 
807  // Case 2: Ranked memref type with specified layout.
808  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
809  if (layout) {
810  return MemRefType::get(rankedTensorType.getShape(),
811  rankedTensorType.getElementType(), layout,
812  memorySpace);
813  }
814 
815  return options.unknownTypeConverterFn(tensorType, memorySpace, options);
816 }
817 
820  Attribute memorySpace) {
821  // Case 1: Unranked memref type.
822  if (auto unrankedTensorType =
823  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
824  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
825  memorySpace);
826  }
827 
828  // Case 2: Ranked memref type.
829  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
830  int64_t dynamicOffset = ShapedType::kDynamic;
831  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
832  ShapedType::kDynamic);
833  auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
834  dynamicOffset, dynamicStrides);
835  return MemRefType::get(rankedTensorType.getShape(),
836  rankedTensorType.getElementType(), stridedLayout,
837  memorySpace);
838 }
839 
840 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
841 /// the given tensor type is unranked, return an unranked MemRef type.
844  Attribute memorySpace) {
845  // Case 1: Unranked memref type.
846  if (auto unrankedTensorType =
847  llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
848  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
849  memorySpace);
850  }
851 
852  // Case 2: Ranked memref type.
853  auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
854  MemRefLayoutAttrInterface layout = {};
855  return MemRefType::get(rankedTensorType.getShape(),
856  rankedTensorType.getElementType(), layout,
857  memorySpace);
858 }
859 
860 //===----------------------------------------------------------------------===//
861 // Default implementations of interface methods
862 //===----------------------------------------------------------------------===//
863 
865  OpResult opResult, const AnalysisState &state) {
866  auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
867  AliasingOpOperandList opOperands =
868  bufferizableOp.getAliasingOpOperands(opResult, state);
869 
870  // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
871  // memory writes.
872  if (opOperands.getAliases().empty())
873  return true;
874 
875  // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
876  // may bufferize to a memory write.
877  if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
878  return state.bufferizesToMemoryWrite(*alias.opOperand);
879  }))
880  return true;
881 
882  // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
883  // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
884  // case, the OpResult bufferizes to a memory write. E.g.:
885  //
886  // %0 = "some_writing_op" : tensor<?xf32>
887  // %r = scf.if ... -> tensor<?xf32> {
888  // scf.yield %0 : tensor<?xf32>
889  // } else {
890  // %1 = "another_writing_op"(%0) : tensor<?xf32>
891  // scf.yield %1 : tensor<?xf32>
892  // }
893  // "some_reading_op"(%r)
894  //
895  // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
896  // bufferizes to a memory write and the defining op is inside the scf.if.
897  //
898  // Note: This treatment of surrouding ops is useful for ops that have a
899  // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
900  // the analysis considerably.
901  //
902  // "another_writing_op" in the above example should be able to bufferize
903  // inplace in the absence of another read of %0. However, if the scf.if op
904  // would not be considered a "write", the analysis would detect the
905  // following conflict:
906  //
907  // * read = some_reading_op
908  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
909  // * conflictingWrite = %1
910  //
911  auto isMemoryWriteInsideOp = [&](Value v) {
912  Operation *op = getOwnerOfValue(v);
913  if (!opResult.getDefiningOp()->isAncestor(op))
914  return false;
915  return state.bufferizesToMemoryWrite(v);
916  };
918  config.alwaysIncludeLeaves = false;
919  for (AliasingOpOperand alias : opOperands) {
920  if (!state
921  .findValueInReverseUseDefChain(alias.opOperand,
922  isMemoryWriteInsideOp, config)
923  .empty())
924  return true;
925  }
926  return false;
927 }
928 
929 // Compute the AliasingOpOperandList for a given Value based on
930 // getAliasingValues.
932  Value value, const AnalysisState &state) {
933  Operation *op = getOwnerOfValue(value);
935  for (OpOperand &opOperand : op->getOpOperands()) {
936  if (!llvm::isa<TensorType>(opOperand.get().getType()))
937  continue;
938  AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
939  for (const auto &it : aliasingValues)
940  if (it.value == value)
941  result.emplace_back(&opOperand, it.relation, it.isDefinite);
942  }
943  return AliasingOpOperandList(std::move(result));
944 }
945 
947  Value value, const BufferizationOptions &options,
948  const BufferizationState &bufferizationState,
949  SmallVector<Value> &invocationStack) {
950  assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
951  auto tensorType = cast<TensorType>(value.getType());
952 
953  // No further analysis is possible for a block argument.
954  if (llvm::isa<BlockArgument>(value)) {
955  return cast<BufferLikeType>(
957  }
958 
959  // Value is an OpResult.
960  Operation *op = getOwnerOfValue(value);
961  auto opResult = llvm::cast<OpResult>(value);
962  AnalysisState analysisState(options);
963  AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
964  if (aliases.getNumAliases() > 0 &&
965  aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
966  // If the OpResult has an equivalent OpOperand, both OpResult and
967  // OpOperand bufferize to the exact same buffer type.
968  Value equivalentOperand = aliases.getAliases().front().opOperand->get();
969  return getBufferType(equivalentOperand, options, bufferizationState,
970  invocationStack);
971  }
972 
973  // If we do not know the memory space and there is no default memory space,
974  // report a failure.
975  auto memSpace =
976  options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
977  if (!memSpace.has_value())
978  return op->emitError("could not infer memory space");
979 
980  return cast<BufferLikeType>(
981  getMemRefType(tensorType, options, /*layout=*/{}, *memSpace));
982 }
983 
985  BufferizableOpInterface bufferizableOp, unsigned index) {
986  assert(index < bufferizableOp->getNumRegions() && "invalid region index");
987  auto regionInterface =
988  dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
989  if (!regionInterface)
990  return false;
991  return regionInterface.isRepetitiveRegion(index);
992 }
993 
996  // TODO: Take into account successor blocks.
997  // No aliasing in case of non-entry blocks.
998  if (auto bbArg = dyn_cast<BlockArgument>(value))
999  if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1000  return {};
1001 
1002  // Unknown op: Conservatively assume that each OpResult may alias with every
1003  // OpOperand. In addition, each block argument of an entry block may alias
1004  // with every OpOperand.
1006  for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
1007  if (isa<TensorType>(operand.get().getType()))
1008  r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
1009  return r;
1010 }
1011 
1014  // TODO: Take into account successor blocks.
1015  // Unknown op: Conservatively assume that each OpResult may alias with every
1016  // OpOperand. In addition, each block argument of an entry block may alias
1017  // with every OpOperand.
1019  for (OpResult result : opOperand.getOwner()->getOpResults())
1020  if (llvm::isa<TensorType>(result.getType()))
1021  r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
1022  for (Region &region : opOperand.getOwner()->getRegions())
1023  if (!region.getBlocks().empty())
1024  for (BlockArgument bbArg : region.getBlocks().front().getArguments())
1025  if (isa<TensorType>(bbArg.getType()))
1026  r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
1027  return r;
1028 }
1029 
1031  auto isaTensor = [](Type t) { return isa<TensorLikeType>(t); };
1032  bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
1033  return any_of(r.getBlocks(), [&](Block &b) {
1034  return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1035  return isaTensor(bbArg.getType());
1036  });
1037  });
1038  });
1039  if (hasTensorBlockArgument)
1040  return true;
1041 
1042  if (any_of(op->getResultTypes(), isaTensor))
1043  return true;
1044  return any_of(op->getOperandTypes(), isaTensor);
1045 }
1046 
1047 FailureOr<BaseMemRefType>
1048 bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1049  if (failed(bufferType))
1050  return failure();
1051  return cast<BaseMemRefType>(*bufferType);
1052 }
1053 
1055  Value tensor,
1056  Value buffer) {
1057  return mlir::succeeded(
1058  cast<TensorLikeType>(tensor.getType())
1059  .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.getType()),
1060  [&]() { return op.emitError(); }));
1061 }
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void ensureToBufferOpIsValid(Value tensor, Type memrefType)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition: TypeID.h:323
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:104
This class represents an argument of a Block.
Definition: Value.h:309
Block represents an ordered list of Operations.
Definition: Block.h:33
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:27
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:107
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:346
This class helps build Operations.
Definition: Builders.h:205
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:429
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:396
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:410
This class represents an operand of an operation.
Definition: Value.h:257
This is a value defined by a result of an operation.
Definition: Value.h:447
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition: Operation.h:234
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:267
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:213
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:677
operand_type_range getOperandTypes()
Definition: Operation.h:397
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:383
result_type_range getResultTypes()
Definition: Operation.h:428
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition: Operation.h:263
result_range getOpResults()
Definition: Operation.h:420
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition: Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition: Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition: Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition: Region.h:200
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:358
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:628
This class represents a collection of SymbolTables.
Definition: SymbolTable.h:283
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:55
This class provides an efficient unique identifier for a specific C++ type.
Definition: TypeID.h:107
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:188
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:18
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition: Value.cpp:39
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
SetVector< Value > findValueInReverseUseDefChain(OpOperand *opOperand, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig(), llvm::DenseSet< OpOperand * > *visitedOpOperands=nullptr) const
Starting from opOperand, follow the use-def chain in reverse, always selecting the aliasing OpOperand...
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool insideMutuallyExclusiveRegions(Operation *op0, Operation *op1)
Checks whether op0 and op1 are inside mutually exclusive regions.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
SetVector< Value > findDefinitions(OpOperand *opOperand) const
Find the values that may define the contents of the given value at runtime.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
BufferizationState provides information about the state of the IR during the bufferization process.
SymbolTableCollection & getSymbolTables()
Get a reference to the collection of cached symbol tables.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
FailureOr< BufferLikeType > defaultGetBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
FailureOr< BaseMemRefType > asMemRefType(FailureOr< BufferLikeType > bufferType)
This is a helper function used when buffer type is guaranteed to be memref.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
bool typesMatchAfterBufferization(Operation &op, Value tensor, Value buffer)
This function is a free-standing helper that relies on bufferization::TensorLikeTypeInterface to veri...
BaseMemRefType getMemRefType(TensorType tensorType, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the TensorType can be bufferized.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options, const BufferizationState &state)
Lookup the buffer for the given value.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
FailureOr< BufferLikeType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, const BufferizationState &state, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
Definition: MemRefOps.cpp:60
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition: Remarks.h:491
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition: Utils.cpp:111
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.