MLIR  16.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "mlir/IR/AsmState.h"
16 #include "mlir/IR/BuiltinOps.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "mlir/IR/Value.h"
20 #include "llvm/Support/Debug.h"
21 
22 //===----------------------------------------------------------------------===//
23 // BufferizableOpInterface
24 //===----------------------------------------------------------------------===//
25 
26 namespace mlir {
27 namespace bufferization {
28 
29 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
30 
31 } // namespace bufferization
32 } // namespace mlir
33 
34 #define DEBUG_TYPE "bufferizable-op-interface"
35 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
36 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
37 
38 using namespace mlir;
39 using namespace bufferization;
40 
41 /// Return the owner of the given value.
43  if (auto opResult = value.dyn_cast<OpResult>())
44  return opResult.getDefiningOp();
45  return value.cast<BlockArgument>().getOwner()->getParentOp();
46 }
47 
49 #ifndef NDEBUG
50  auto bufferizableOp = opResult.getDefiningOp<BufferizableOpInterface>();
51  assert(bufferizableOp && bufferizableOp.bufferizesToAllocation(opResult) &&
52  "expected op that bufferizes to an allocation");
53 #endif // NDEBUG
54 
55  Operation *op = opResult.getDefiningOp();
56  // If there is no 'escape' attribute, we cannot say for sure.
57  if (!op->hasAttr(BufferizationDialect::kEscapeAttrName))
58  return false;
59  auto attr =
60  op->getAttrOfType<ArrayAttr>(BufferizationDialect::kEscapeAttrName);
61  return !attr[opResult.getResultNumber()].cast<BoolAttr>().getValue();
62 }
63 
64 /// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
65 /// shaped value is copied. Otherwise, a tensor with undefined contents is
66 /// allocated.
68  OpBuilder &b, Location loc, Value shapedValue, bool escape,
69  const BufferizationOptions &options, bool copy) {
70  Value tensor;
71  if (shapedValue.getType().isa<RankedTensorType>()) {
72  tensor = shapedValue;
73  } else if (shapedValue.getType().isa<MemRefType>()) {
74  tensor = b.create<ToTensorOp>(loc, shapedValue);
75  } else {
76  llvm_unreachable("expected RankedTensorType or MemRefType");
77  }
78  RankedTensorType tensorType = tensor.getType().cast<RankedTensorType>();
79  SmallVector<Value> dynamicSizes;
80  if (!copy) {
81  // Compute the dynamic part of the shape.
82  // First try to query the shape via ReifyRankedShapedTypeOpInterface.
83  bool reifiedShapes = false;
84  if (shapedValue.getType().isa<RankedTensorType>() &&
85  shapedValue.isa<OpResult>()) {
86  if (auto rankedOp = dyn_cast_or_null<ReifyRankedShapedTypeOpInterface>(
87  shapedValue.getDefiningOp())) {
88  ReifiedRankedShapedTypeDims resultDims;
89  if (succeeded(rankedOp.reifyResultShapes(b, resultDims))) {
90  reifiedShapes = true;
91  auto &shape =
92  resultDims[shapedValue.cast<OpResult>().getResultNumber()];
93  for (const auto &dim : enumerate(tensorType.getShape()))
94  if (ShapedType::isDynamic(dim.value()))
95  dynamicSizes.push_back(shape[dim.index()]);
96  }
97  }
98  }
99 
100  // If the shape could not be reified, create DimOps.
101  if (!reifiedShapes)
102  populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
103  }
104 
105  // Create AllocTensorOp.
106  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
107  copy ? tensor : Value());
108  allocTensorOp->setAttr(BufferizationDialect::kEscapeAttrName,
109  b.getBoolArrayAttr({escape}));
110 
111  // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
112  if (copy)
113  return allocTensorOp.getResult();
114  FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
115  if (failed(copyBufferType))
116  return failure();
117  allocTensorOp.setMemorySpaceAttr(
118  b.getIntegerAttr(b.getIntegerType(64, /*isSigned=*/false),
119  copyBufferType->getMemorySpaceAsInt()));
120  return allocTensorOp.getResult();
121 }
122 
123 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
124  RewriterBase &rewriter, const AnalysisState &state) {
125  OpBuilder::InsertionGuard g(rewriter);
126  Operation *op = getOperation();
127  SmallVector<OpOperand *> outOfPlaceOpOperands;
128  DenseSet<OpOperand *> copiedOpOperands;
129  DenseSet<OpOperand *> escapingOpOperandCopies;
130  SmallVector<OpResult> outOfPlaceOpResults;
131  DenseSet<OpResult> copiedOpResults;
132  DenseSet<OpResult> escapingOpResultCopies;
133 
134  // Find all out-of-place OpOperands.
135  for (OpOperand &opOperand : op->getOpOperands()) {
136  Type operandType = opOperand.get().getType();
137  if (!operandType.isa<TensorType>())
138  continue;
139  if (state.isInPlace(opOperand))
140  continue;
141  if (operandType.isa<UnrankedTensorType>())
142  return op->emitError("copies of unranked tensors are not supported");
143 
144  SmallVector<OpResult> aliasingOpResults =
145  state.getAliasingOpResult(opOperand);
146  // Is the result yielded from a block? Or are deallocations turned off
147  // entirely? In either case, mark the allocation as "escaping", so that it
148  // will not be deallocated.
149  bool escape = !state.getOptions().createDeallocs ||
150  llvm::any_of(aliasingOpResults, [&](Value v) {
151  return state.isTensorYielded(v);
152  });
153 
154  if (aliasingOpResults.size() == 1 &&
155  !state.bufferizesToMemoryWrite(opOperand) &&
156  state.getAliasingOpOperand(aliasingOpResults.front()).size() == 1) {
157  // The op itself does not write but may create exactly one alias. Instead
158  // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
159  // be smaller than the OpOperand (e.g., in the case of an extract_slice,
160  // where the result is usually a smaller part of the source).
161  outOfPlaceOpResults.push_back(aliasingOpResults.front());
162  if (!state.canOmitTensorCopy(opOperand))
163  copiedOpResults.insert(aliasingOpResults.front());
164  if (escape)
165  escapingOpResultCopies.insert(aliasingOpResults.front());
166  } else {
167  // In all other cases, make a copy of the OpOperand.
168  outOfPlaceOpOperands.push_back(&opOperand);
169  if (!state.canOmitTensorCopy(opOperand))
170  copiedOpOperands.insert(&opOperand);
171  if (escape)
172  escapingOpOperandCopies.insert(&opOperand);
173  }
174  }
175 
176  // Insert copies of OpOperands.
177  rewriter.setInsertionPoint(op);
178  for (OpOperand *opOperand : outOfPlaceOpOperands) {
180  rewriter, op->getLoc(), opOperand->get(),
181  escapingOpOperandCopies.contains(opOperand), state.getOptions(),
182  copiedOpOperands.contains(opOperand));
183  if (failed(copy))
184  return failure();
185  rewriter.updateRootInPlace(op, [&]() { opOperand->set(*copy); });
186  }
187 
188  // Insert copies of OpResults.
189  rewriter.setInsertionPointAfter(op);
190  for (OpResult opResult : outOfPlaceOpResults) {
192  rewriter, op->getLoc(), opResult,
193  escapingOpResultCopies.contains(opResult), state.getOptions(),
194  copiedOpResults.count(opResult));
195  if (failed(copy))
196  return failure();
197  SmallVector<OpOperand *> uses = llvm::to_vector(llvm::map_range(
198  opResult.getUses(), [](OpOperand &use) { return &use; }));
199  for (OpOperand *use : uses) {
200  // Do not update the alloc_tensor op that we just created.
201  if (use->getOwner() != copy->getDefiningOp())
202  rewriter.updateRootInPlace(use->getOwner(), [&]() { use->set(*copy); });
203  }
204  }
205 
206  return success();
207 }
208 
210  OpResult opResult, const BufferizationOptions &options) {
211  Operation *op = opResult.getOwner();
212  assert(options.dynCastBufferizableOp(op).bufferizesToAllocation(opResult) &&
213  "expected that op allocates");
214 
215  AnalysisState analysisState(options);
216  if (op->hasAttr(BufferizationDialect::kEscapeAttrName)) {
217  // AllocTensorOp has one result.
218  ArrayAttr escapeAttr =
219  op->getAttr(BufferizationDialect::kEscapeAttrName).cast<ArrayAttr>();
220  return !escapeAttr[0].cast<BoolAttr>().getValue();
221  }
222 
223  // No "escape" annotation found.
224  if (options.createDeallocs) {
225  // Perform an ad-hoc analysis.
226  return !analysisState.isTensorYielded(opResult);
227  }
228 
229  return false;
230 }
231 
232 //===----------------------------------------------------------------------===//
233 // OpFilter
234 //===----------------------------------------------------------------------===//
235 
237  // All other ops: Allow/disallow according to filter.
238  bool isAllowed = !hasAllowRule();
239  for (const Entry &entry : entries) {
240  bool filterResult = entry.fn(op);
241  switch (entry.type) {
242  case Entry::ALLOW:
243  isAllowed |= filterResult;
244  break;
245  case Entry::DENY:
246  if (filterResult)
247  // DENY filter matches. This op is no allowed. (Even if other ALLOW
248  // filters may match.)
249  return false;
250  };
251  }
252  return isAllowed;
253 }
254 
255 //===----------------------------------------------------------------------===//
256 // BufferizationOptions
257 //===----------------------------------------------------------------------===//
258 
259 /// Default unknown type converter: Use a fully dynamic layout map.
260 static BaseMemRefType
262  const BufferizationOptions &options) {
264  memorySpace);
265 }
266 
267 // Default constructor for BufferizationOptions.
269  : unknownTypeConverterFn(defaultUnknownTypeConverter) {}
270 
272  // Special case: If function boundary bufferization is deactivated, do not
273  // allow ops that belong to the `func` dialect.
274  bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
275  if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
276  return false;
277 
278  return opFilter.isOpAllowed(op);
279 }
280 
281 BufferizableOpInterface
283  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
284  if (!bufferizableOp)
285  return nullptr;
286  if (!isOpAllowed(op))
287  return nullptr;
288  return bufferizableOp;
289 }
290 
291 BufferizableOpInterface
293  if (auto bufferizableOp = value.getDefiningOp<BufferizableOpInterface>())
294  if (isOpAllowed(bufferizableOp.getOperation()))
295  return bufferizableOp;
296  return nullptr;
297 }
298 
300  StringRef name, const DialectStateInitFn &fn) {
301  stateInitializers.push_back(
302  [=](AnalysisState &state) { state.insertDialectState(name, fn()); });
303 }
304 
305 //===----------------------------------------------------------------------===//
306 // Helper functions for BufferizableOpInterface
307 //===----------------------------------------------------------------------===//
308 
310  if (auto bbArg = value.dyn_cast<BlockArgument>()) {
311  b.setInsertionPointToStart(bbArg.getOwner());
312  } else {
314  }
315 }
316 
317 /// Determine which OpOperand* will alias with `result` if the op is bufferized
318 /// in place. Return an empty vector if the op is not bufferizable.
321  if (Operation *op = result.getDefiningOp())
322  if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
323  return bufferizableOp.getAliasingOpOperand(result, *this);
324  return {};
325 }
326 
327 /// Determine which OpResult will alias with `opOperand` if the op is bufferized
328 /// in place. Return an empty vector if the op is not bufferizable.
331  if (auto bufferizableOp =
332  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
333  return bufferizableOp.getAliasingOpResult(opOperand, *this);
334  return {};
335 }
336 
337 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
338 /// op is not bufferizable.
340  if (auto bufferizableOp =
341  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
342  return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
343 
344  // Unknown op that returns a tensor. The inplace analysis does not support it.
345  // Conservatively return true.
346  return true;
347 }
348 
349 /// Return true if `opOperand` bufferizes to a memory write. Return
350 /// `true` if the op is not bufferizable.
352  if (auto bufferizableOp =
353  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
354  return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
355 
356  // Unknown op that returns a tensor. The inplace analysis does not support it.
357  // Conservatively return true.
358  return true;
359 }
360 
361 /// Return true if `opOperand` does neither read nor write but bufferizes to an
362 /// alias. Return false if the op is not bufferizable.
364  if (auto bufferizableOp =
365  getOptions().dynCastBufferizableOp(opOperand.getOwner()))
366  return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
367 
368  // Unknown op that returns a tensor. The inplace analysis does not support it.
369  // Conservatively return false.
370  return false;
371 }
372 
373 /// Return true if the given value is read by an op that bufferizes to a memory
374 /// read. Also takes into account ops that create an alias but do not read by
375 /// themselves (e.g., ExtractSliceOp).
377  assert(value.getType().isa<TensorType>() && "expected TensorType");
378  SmallVector<OpOperand *> workingSet;
379  for (OpOperand &use : value.getUses())
380  workingSet.push_back(&use);
381 
382  while (!workingSet.empty()) {
383  OpOperand *uMaybeReading = workingSet.pop_back_val();
384  // Skip over all ops that neither read nor write (but create an alias).
385  if (bufferizesToAliasOnly(*uMaybeReading))
386  for (OpResult opResult : getAliasingOpResult(*uMaybeReading))
387  for (OpOperand &use : opResult.getUses())
388  workingSet.push_back(&use);
389  if (bufferizesToMemoryRead(*uMaybeReading))
390  return true;
391  }
392 
393  return false;
394 }
395 
396 // Starting from `value`, follow the use-def chain in reverse, always selecting
397 // the aliasing OpOperands. Find and return Values for which `condition`
398 // evaluates to true. OpOperands of such matching Values are not traversed any
399 // further.
401  Value value, llvm::function_ref<bool(Value)> condition) const {
402  llvm::SetVector<Value> result, workingSet;
403  workingSet.insert(value);
404 
405  while (!workingSet.empty()) {
406  Value value = workingSet.pop_back_val();
407  if (condition(value) || value.isa<BlockArgument>()) {
408  result.insert(value);
409  continue;
410  }
411 
412  OpResult opResult = value.cast<OpResult>();
413  SmallVector<OpOperand *> opOperands = getAliasingOpOperand(opResult);
414  if (opOperands.empty() || !options.isOpAllowed(value.getDefiningOp())) {
415  result.insert(value);
416  continue;
417  }
418 
419  for (OpOperand *o : opOperands)
420  workingSet.insert(o->get());
421  }
422 
423  return result;
424 }
425 
426 // Find the Values of the last preceding write of a given Value.
429  return findValueInReverseUseDefChain(value, [&](Value value) {
430  Operation *op = value.getDefiningOp();
431  if (!op)
432  return true;
433  auto bufferizableOp = options.dynCastBufferizableOp(op);
434  if (!bufferizableOp)
435  return true;
436  return bufferizableOp.isMemoryWrite(value.cast<OpResult>(), *this);
437  });
438 }
439 
441  : options(options) {
443  options.stateInitializers)
444  fn(*this);
445 }
446 
448  // Do not copy if the tensor has undefined contents.
449  if (hasUndefinedContents(&opOperand))
450  return true;
451 
452  // Do not copy if the buffer of the tensor is entirely overwritten (with
453  // values that do not depend on the old tensor).
454  if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
455  return true;
456 
457  // Do not copy if the tensor is never read.
458  SmallVector<OpResult> aliasingOpResults = getAliasingOpResult(opOperand);
459  if (!bufferizesToMemoryRead(opOperand) &&
460  llvm::none_of(aliasingOpResults,
461  [&](OpResult opResult) { return isValueRead(opResult); }))
462  return true;
463 
464  // Default: Cannot omit the copy.
465  return false;
466 }
467 
468 bool AnalysisState::isInPlace(OpOperand &opOperand) const {
469  // ToMemrefOps are always in-place.
470  if (isa<ToMemrefOp>(opOperand.getOwner()))
471  return true;
472 
473  // In the absence of analysis information, OpOperands that bufferize to a
474  // memory write are out-of-place, i.e., an alloc and copy is inserted.
475  return !bufferizesToMemoryWrite(opOperand);
476 }
477 
479  // In the absence of analysis information, we do not know if the values are
480  // equivalent. The conservative answer is "false".
481  return false;
482 }
483 
485  // In the absence of analysis information, we do not know if the values may be
486  // aliasing. The conservative answer is "true".
487  return true;
488 }
489 
491  // In the absence of analysis information, the conservative answer is "false".
492  return false;
493 }
494 
496  // In the absence of analysis information, the conservative answer is "true".
497  if (!tensor.getDefiningOp<AllocTensorOp>())
498  return true;
499 
500  // For AllocTensorOp results, we can do better: They do not alias with any
501  // preceding value, so we can follow SSA use-def chains and do a simple
502  // analysis.
503  SmallVector<OpOperand *> worklist;
504  for (OpOperand &use : tensor.getUses())
505  worklist.push_back(&use);
506 
507  while (!worklist.empty()) {
508  OpOperand *operand = worklist.pop_back_val();
509  Operation *op = operand->getOwner();
510 
511  // If the op is not bufferizable, we can safely assume that the value is not
512  // yielded. (When bufferizing that op, it must handle such cases.)
513  if (!options.dynCastBufferizableOp(op))
514  continue;
515 
516  // We cannot analyze through ToMemrefOps, so we have to conservatively
517  // assume that the value is yielded.
518  if (isa<ToMemrefOp>(op))
519  return true;
520 
521  // Check if the op is returning/yielding.
522  if (isRegionReturnLike(op))
523  return true;
524 
525  // Add all aliasing OpResults to the worklist.
526  // Note: In the absence of detailed analysis information (e.g., there may be
527  // no function call analysis information), this `getAliasingOpResult` is
528  // conservative and may report additional OpResults as potentially aliasing.
529  for (OpResult opResult : getAliasingOpResult(*operand))
530  for (OpOperand &use : opResult.getUses())
531  worklist.push_back(&use);
532  }
533 
534  // No ReturnLike op found: The value is not yielded.
535  return false;
536 }
537 
538 // bufferization.to_memref is not allowed to change the rank.
539 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
540 #ifndef NDEBUG
541  auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>();
542  assert((!rankedTensorType || memrefType.cast<MemRefType>().getRank() ==
543  rankedTensorType.getRank()) &&
544  "to_memref would be invalid: mismatching ranks");
545 #endif
546 }
547 
549  const BufferizationOptions &options) {
550 #ifndef NDEBUG
551  auto tensorType = value.getType().dyn_cast<TensorType>();
552  assert(tensorType && "unexpected non-tensor type");
553 #endif // NDEBUG
554 
555  // Replace "%t = to_tensor %m" with %m.
556  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
557  return toTensorOp.getMemref();
558 
559  // Insert to_memref op.
560  OpBuilder::InsertionGuard g(rewriter);
561  setInsertionPointAfter(rewriter, value);
562  FailureOr<BaseMemRefType> memrefType = getBufferType(value, options);
563  if (failed(memrefType))
564  return failure();
565  ensureToMemrefOpIsValid(value, *memrefType);
566  return rewriter
567  .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
568  .getResult();
569 }
570 
571 /// Return the buffer type for a given Value (tensor) after bufferization.
574  assert(value.getType().isa<TensorType>() && "unexpected non-tensor type");
575  Operation *op = getOwnerOfValue(value);
576 
577  // ToTensorOp: Take buffer type directly from the op.
578  if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
579  return toTensorOp.getMemref().getType().cast<BaseMemRefType>();
580 
581  // If value is a bbArg of a bufferizable op: query op interface.
582  if (auto bbArg = value.dyn_cast<BlockArgument>())
583  if (auto bufferizableOp =
584  options.dynCastBufferizableOp(bbArg.getOwner()->getParentOp()))
585  return bufferizableOp.getBufferType(bbArg, options);
586 
587  // Check value is a new buffer allocation with a memory space attribute. In
588  // that case we can at least infer the memory space.
589  Optional<unsigned> memorySpace = None;
590  if (auto opResult = value.dyn_cast<OpResult>()) {
591  if (auto bufferizableOp =
592  options.dynCastBufferizableOp(opResult.getDefiningOp())) {
593  if (bufferizableOp.bufferizesToAllocation(opResult)) {
594  FailureOr<unsigned> queriedMemorySpace =
595  bufferizableOp.getMemorySpace(opResult);
596  if (!failed(queriedMemorySpace))
597  memorySpace = *queriedMemorySpace;
598  }
599  }
600  }
601 
602  // If we still do not know the memory space, use the default memory space (if
603  // any).
604  if (!memorySpace.has_value())
605  memorySpace = options.defaultMemorySpace;
606 
607  // If we still do not know the memory space, report a failure.
608  if (!memorySpace.has_value())
609  return op->emitError("could not infer memory space");
610 
611  return getMemRefType(value, options, /*layout=*/{}, *memorySpace);
612 }
613 
615  Operation *op,
616  ValueRange values) {
617  assert(values.size() == op->getNumResults() &&
618  "expected one value per OpResult");
619  OpBuilder::InsertionGuard g(rewriter);
620 
621  // Replace all OpResults with the given values.
622  SmallVector<Value> replacements;
623  for (OpResult opResult : op->getOpResults()) {
624  Value replacement = values[opResult.getResultNumber()];
625  if (opResult.getType().isa<TensorType>()) {
626  // The OpResult is a tensor. Such values are replaced with memrefs during
627  // bufferization.
628  assert((replacement.getType().isa<MemRefType>() ||
629  replacement.getType().isa<UnrankedMemRefType>()) &&
630  "tensor op result should be replaced with a memref value");
631  // The existing uses of the OpResult still expect a tensor. Insert a
632  // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
633  // loose all of its users and eventually DCE away.
634  rewriter.setInsertionPointAfter(op);
635  replacement = rewriter.create<bufferization::ToTensorOp>(
636  replacement.getLoc(), replacement);
637  }
638  replacements.push_back(replacement);
639  }
640 
641  rewriter.replaceOp(op, replacements);
642 }
643 
644 //===----------------------------------------------------------------------===//
645 // Bufferization-specific scoped alloc/dealloc insertion support.
646 //===----------------------------------------------------------------------===//
647 
648 /// Create a memref allocation with the given type and dynamic extents.
650  MemRefType type,
651  ValueRange dynShape) const {
652  if (allocationFn)
653  return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
654 
655  // Default bufferallocation via AllocOp.
656  if (bufferAlignment != 0)
657  return b
658  .create<memref::AllocOp>(loc, type, dynShape,
660  .getResult();
661  return b.create<memref::AllocOp>(loc, type, dynShape).getResult();
662 }
663 
664 /// Creates a memref deallocation. The given memref buffer must have been
665 /// allocated using `createAlloc`.
667  Value allocatedBuffer) const {
668  if (deallocationFn)
669  return (*deallocationFn)(b, loc, allocatedBuffer);
670 
671  // Default buffer deallocation via DeallocOp.
672  b.create<memref::DeallocOp>(loc, allocatedBuffer);
673  return success();
674 }
675 
676 /// Create a memory copy between two memref buffers.
678  Value from, Value to) const {
679  if (memCpyFn)
680  return (*memCpyFn)(b, loc, from, to);
681 
682  b.create<memref::CopyOp>(loc, from, to);
683  return success();
684 }
685 
686 //===----------------------------------------------------------------------===//
687 // Bufferization-specific BlockAndValueMapping support with debugging.
688 //===----------------------------------------------------------------------===//
689 
691  auto bbArg = value.dyn_cast<BlockArgument>();
692  if (!bbArg)
693  return false;
694  return isa<func::FuncOp>(bbArg.getOwner()->getParentOp());
695 }
696 
698  const BufferizationOptions &options,
699  MemRefLayoutAttrInterface layout,
700  unsigned memorySpace) {
701  auto tensorType = value.getType().cast<TensorType>();
702  auto memorySpaceAttr = IntegerAttr::get(
703  IntegerType::get(tensorType.getContext(), 64), memorySpace);
704 
705  // Case 1: Unranked memref type.
706  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
707  assert(!layout && "UnrankedTensorType cannot have a layout map");
708  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
709  memorySpaceAttr);
710  }
711 
712  // Case 2: Ranked memref type with specified layout.
713  auto rankedTensorType = tensorType.cast<RankedTensorType>();
714  if (layout) {
715  return MemRefType::get(rankedTensorType.getShape(),
716  rankedTensorType.getElementType(), layout,
717  memorySpaceAttr);
718  }
719 
720  return options.unknownTypeConverterFn(value, memorySpace, options);
721 }
722 
725  unsigned memorySpace) {
726  // Case 1: Unranked memref type.
727  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
728  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
729  memorySpace);
730  }
731 
732  // Case 2: Ranked memref type.
733  auto memorySpaceAttr = IntegerAttr::get(
734  IntegerType::get(tensorType.getContext(), 64), memorySpace);
735  auto rankedTensorType = tensorType.cast<RankedTensorType>();
736  int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
737  SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
738  ShapedType::kDynamicStrideOrOffset);
739  AffineMap stridedLayout = makeStridedLinearLayoutMap(
740  dynamicStrides, dynamicOffset, rankedTensorType.getContext());
741  return MemRefType::get(rankedTensorType.getShape(),
742  rankedTensorType.getElementType(), stridedLayout,
743  memorySpaceAttr);
744 }
745 
746 /// Return a MemRef type with a static identity layout (i.e., no layout map). If
747 /// the given tensor type is unranked, return an unranked MemRef type.
750  unsigned memorySpace) {
751  // Case 1: Unranked memref type.
752  if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
753  return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
754  memorySpace);
755  }
756 
757  // Case 2: Ranked memref type.
758  auto rankedTensorType = tensorType.cast<RankedTensorType>();
759  auto memorySpaceAttr = IntegerAttr::get(
760  IntegerType::get(tensorType.getContext(), 64), memorySpace);
761  MemRefLayoutAttrInterface layout = {};
762  return MemRefType::get(rankedTensorType.getShape(),
763  rankedTensorType.getElementType(), layout,
764  memorySpaceAttr);
765 }
Include the generated interface declarations.
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
SmallVector< OpResult > getAliasingOpResult(OpOperand &opOperand) const
Determine which OpResult will alias with opOperand if the op is bufferized in place.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
SmallVector< OpOperand * > getAliasingOpOperand(OpResult result) const
Determine which OpOperand* will alias with result if the op is bufferized in place.
U cast() const
Definition: Attributes.h:135
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, bool escape, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:28
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
This is a value defined by a result of an operation.
Definition: Value.h:425
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:344
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
AttrClass getAttrOfType(StringAttr name)
Definition: Operation.h:375
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
Definition: LogicalResult.h:72
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value...
Definition: LogicalResult.h:68
bool isRegionReturnLike(Operation *operation)
Returns true if the given operation is either annotated with the ReturnLike trait or implements the R...
Optional< AllocationFn > allocationFn
Helper functions for allocation, deallocation, memory copying.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
std::function< std::unique_ptr< DialectAnalysisState >()> DialectStateInitFn
Initializer function for dialect-specific analysis state.
bool shouldDeallocateOpResult(OpResult opResult, const BufferizationOptions &options)
Return true if the buffer of given OpResult should be deallocated.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
static constexpr const bool value
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:48
Operation * getOwner() const
Returns the operation that owns this result.
Definition: Value.h:434
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:358
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:300
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, unsigned memorySpace=0)
Return a MemRef type with a static identity layout (i.e., no layout map).
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:404
AnalysisState provides a variety of helper functions for dealing with tensor values.
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
Definition: Operation.h:385
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
virtual void replaceOp(Operation *op, ValueRange newValues)
This method replaces the results of the operation with the specified list of values.
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization.
This class provides support for representing a failure result, or a valid value of type T...
Definition: LogicalResult.h:78
UnknownTypeConverterFn unknownTypeConverterFn
Type converter from tensors to memrefs.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
IntegerAttr getIntegerAttr(Type type, int64_t value)
Definition: Builders.cpp:194
U dyn_cast() const
Definition: Types.h:270
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:99
void addDialectStateInitializer(StringRef name, const DialectStateInitFn &fn)
Add a analysis state initializer that initializes the specified dialect-specific analysis state...
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, unsigned memorySpace=0)
Return a MemRefType to which the type of the given value can be bufferized.
static BaseMemRefType defaultUnknownTypeConverter(Value value, unsigned memorySpace, const BufferizationOptions &options)
Default unknown type converter: Use a fully dynamic layout map.
Special case of IntegerAttr to represent boolean integers, i.e., signless i1 integers.
bool createDeallocs
Specifies whether dealloc ops should be generated along with alloc ops.
U dyn_cast() const
Definition: Value.h:100
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:233
unsigned getResultNumber() const
Returns the number of this result.
Definition: Value.h:437
IntegerType getIntegerType(unsigned width)
Definition: Builders.cpp:58
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool allocationDoesNotEscape(OpResult opResult)
Return true if the allocation of the given op is guaranteed to not escape the containing block...
void updateRootInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around a root update of an operation.
Definition: PatternMatch.h:499
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:154
A multi-dimensional affine map Affine map&#39;s are immutable like Type&#39;s, and they are uniqued...
Definition: AffineMap.h:42
bool isFunctionArgument(Value value)
Return true if the given value is a BlockArgument of a func::FuncOp.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not...
This class represents an argument of a Block.
Definition: Value.h:300
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:76
Options for BufferizableOpInterface-based bufferization.
result_range getOpResults()
Definition: Operation.h:337
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
ArrayAttr getBoolArrayAttr(ArrayRef< bool > values)
Definition: Builders.cpp:233
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:72
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition: Types.cpp:19
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:85
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand...
static Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
AnalysisState(const BufferizationOptions &options)
static llvm::ManagedStatic< PassManagerOptions > options
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:377
bool isa() const
Definition: Value.h:90
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:294
virtual bool isTensorYielded(Value tensor) const
Return true if the given tensor (or an aliasing tensor) is yielded from the containing block...
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
Definition: Operation.h:395
Type getType() const
Return the type of this value.
Definition: Value.h:118
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:112
Optional< unsigned > defaultMemorySpace
The default memory space that should be used when it cannot be inferred from the context.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:151
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:40
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
AffineMap makeStridedLinearLayoutMap(ArrayRef< int64_t > strides, int64_t offset, MLIRContext *context)
Given a list of strides (in which MemRefType::getDynamicStrideOrOffset() represents a dynamic value)...
This class represents an operand of an operation.
Definition: Value.h:251
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, unsigned memorySpace=0)
Return a MemRef type with fully dynamic layout.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read. ...
U cast() const
Definition: Value.h:108
void insertDialectState(StringRef name, std::unique_ptr< DialectAnalysisState > state)
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:321
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
static void setInsertionPointAfter(OpBuilder &b, Value value)
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
bool isa() const
Definition: Types.h:254
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:221
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
This class helps build Operations.
Definition: Builders.h:192
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:345
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Value.h:197
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
Definition: Operation.h:371
SetVector< Value > findLastPrecedingWrite(Value value) const
Find the Values of the last preceding write of a given Value.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:398
LogicalResult createDealloc(OpBuilder &b, Location loc, Value allocatedBuffer) const
Creates a memref deallocation.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
U cast() const
Definition: Types.h:278