MLIR  22.0.0git
FuncBufferizableOpInterfaceImpl.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
16 #include "mlir/IR/Dialect.h"
17 #include "mlir/IR/Operation.h"
18 #include <optional>
19 
20 namespace mlir {
21 /// Return all func.return ops in the given function.
24  for (Block &b : funcOp.getBody())
25  if (auto returnOp = dyn_cast<func::ReturnOp>(b.getTerminator()))
26  result.push_back(returnOp);
27  return result;
28 }
29 
30 namespace bufferization {
31 namespace func_ext {
32 
35  auto createdEquiv = equivalentFuncArgs.try_emplace(funcOp, IndexMapping());
36  auto createdAliasingResults =
37  aliasingReturnVals.try_emplace(funcOp, IndexToIndexListMapping());
38  auto createdRead = readBbArgs.try_emplace(funcOp, BbArgIndexSet());
39  auto createdWritten = writtenBbArgs.try_emplace(funcOp, BbArgIndexSet());
40  (void)createdEquiv;
41  (void)createdAliasingResults;
42  (void)createdRead;
43  (void)createdWritten;
44 #ifndef NDEBUG
45  assert(createdEquiv.second && "equivalence info exists already");
46  assert(createdAliasingResults.second && "aliasing info exists already");
47  assert(createdRead.second && "bbarg access info exists already");
48  assert(createdWritten.second && "bbarg access info exists already");
49 #endif // NDEBUG
50 }
51 
52 // Note: this is a local adaptor to unify TensorType and TensorLikeType code
53 // paths that both work with BufferizationOptions.
54 static mlir::Attribute
56  TensorLikeType type) {
57  if (auto tensorType = dyn_cast<TensorType>(type)) {
58  return *options.defaultMemorySpaceFn(tensorType);
59  }
60  return nullptr;
61 }
62 
63 /// Return the index-th bufferized function argument type. This assumes that the
64 /// specified argument is a tensor. If the tensor is ranked, a layout map may be
65 /// specified by the user (as per `options.functionArgTypeConverterFn`).
66 static BufferLikeType
67 getBufferizedFunctionArgType(FuncOp funcOp, int64_t index,
69  auto type =
70  dyn_cast<TensorLikeType>(funcOp.getFunctionType().getInput(index));
71  assert(type && "expected TensorLikeType");
72 
73  // Note: For builtin tensors there is additional logic related to layout.
74  if (auto tensorType = dyn_cast<TensorType>(type)) {
75  BufferLikeType memrefType = options.functionArgTypeConverterFn(
76  type, *options.defaultMemorySpaceFn(tensorType), funcOp, options);
77 
78  auto layoutAttr = funcOp.getArgAttrOfType<MemRefLayoutAttrInterface>(
79  index, BufferizationDialect::kBufferLayoutAttrName);
80  if (!layoutAttr)
81  return memrefType;
82 
83  auto rankedMemrefType = dyn_cast<MemRefType>(memrefType);
84  assert(rankedMemrefType &&
85  "buffer layout not supported on unranked tensors");
86  return cast<BufferLikeType>(MemRefType::get(
87  rankedMemrefType.getShape(), rankedMemrefType.getElementType(),
88  layoutAttr, rankedMemrefType.getMemorySpace()));
89  }
90 
91  return options.functionArgTypeConverterFn(type, /*memSpace=*/nullptr, funcOp,
92  options);
93 }
94 
95 /// Return the FuncOp called by `callOp`.
96 static FuncOp getCalledFunction(CallOpInterface callOp,
97  SymbolTableCollection &symbolTables) {
98  SymbolRefAttr sym =
99  llvm::dyn_cast_if_present<SymbolRefAttr>(callOp.getCallableForCallee());
100  if (!sym)
101  return nullptr;
102  return dyn_cast_or_null<FuncOp>(
103  symbolTables.lookupNearestSymbolFrom(callOp, sym));
104 }
105 
106 /// Return the FuncOp called by `callOp`.
107 static FuncOp getCalledFunction(CallOpInterface callOp,
108  const AnalysisState &state) {
109  auto &oneShotAnalysisState = static_cast<const OneShotAnalysisState &>(state);
110 
111  if (auto *funcAnalysisState =
112  oneShotAnalysisState.getExtension<FuncAnalysisState>()) {
113  // Use the cached symbol tables.
114  return getCalledFunction(callOp, funcAnalysisState->symbolTables);
115  }
116 
117  SymbolTableCollection symbolTables;
118  return getCalledFunction(callOp, symbolTables);
119 }
120 
121 /// Get FuncAnalysisState.
122 static const FuncAnalysisState &
124  assert(isa<OneShotAnalysisState>(state) && "expected OneShotAnalysisState");
125  auto *result = static_cast<const OneShotAnalysisState &>(state)
126  .getExtension<FuncAnalysisState>();
127  assert(result && "FuncAnalysisState does not exist");
128  return *result;
129 }
130 
131 /// Return the state (phase) of analysis of the FuncOp.
133  FuncOp funcOp) {
134  if (!isa<OneShotAnalysisState>(state))
136  auto *funcState = static_cast<const OneShotAnalysisState &>(state)
137  .getExtension<FuncAnalysisState>();
138  if (!funcState)
140  const auto &analyzedFuncOps = funcState->analyzedFuncOps;
141  auto it = analyzedFuncOps.find(funcOp);
142  if (it == analyzedFuncOps.end())
144  return it->second;
145 }
146 
147 /// Return the index of the bbArg in the given FuncOp that is equivalent to the
148 /// specified return value (if any).
149 static std::optional<int64_t>
150 getEquivalentFuncArgIdx(FuncOp funcOp, const FuncAnalysisState &state,
151  int64_t returnValIdx) {
152  auto funcOpIt = state.equivalentFuncArgs.find(funcOp);
153  if (funcOpIt == state.equivalentFuncArgs.end())
154  // No equivalence info stores for funcOp.
155  return std::nullopt;
156 
157  auto retValIt = funcOpIt->getSecond().find(returnValIdx);
158  if (retValIt == funcOpIt->getSecond().end())
159  // Return value has no equivalent bbArg.
160  return std::nullopt;
161 
162  return retValIt->getSecond();
163 }
164 
166  : public BufferizableOpInterface::ExternalModel<CallOpInterface,
167  func::CallOp> {
169  const AnalysisState &state) const {
170  func::CallOp callOp = cast<func::CallOp>(op);
171  FuncOp funcOp = getCalledFunction(callOp, state);
172  assert(funcOp && "expected CallOp to a FuncOp");
173 
175  // FuncOp not analyzed yet. Assume that OpOperand is read.
176  return true;
177 
178  const FuncAnalysisState &funcState = getFuncAnalysisState(state);
179  return funcState.readBbArgs.lookup(funcOp).contains(
180  opOperand.getOperandNumber());
181  }
182 
184  const AnalysisState &state) const {
185  func::CallOp callOp = cast<func::CallOp>(op);
186  FuncOp funcOp = getCalledFunction(callOp, state);
187  assert(funcOp && "expected CallOp to a FuncOp");
188 
190  // FuncOp not analyzed yet. Assume that OpOperand is written.
191  return true;
192 
193  const FuncAnalysisState &funcState = getFuncAnalysisState(state);
194  return funcState.writtenBbArgs.lookup(funcOp).contains(
195  opOperand.getOperandNumber());
196  }
197 
199  const AnalysisState &state) const {
200  func::CallOp callOp = cast<func::CallOp>(op);
201  FuncOp funcOp = getCalledFunction(callOp, state);
202  assert(funcOp && "expected CallOp to a FuncOp");
204  // FuncOp not analyzed yet. Any OpResult may be aliasing.
205  return detail::unknownGetAliasingValues(opOperand);
206 
207  // Get aliasing results from state.
208  const FuncAnalysisState &funcState = getFuncAnalysisState(state);
209  auto aliasingReturnVals =
210  funcState.aliasingReturnVals.lookup(funcOp).lookup(
211  opOperand.getOperandNumber());
212 
213  // Check if the aliasing OpResult is equivalent to the OpOperand.
214  std::optional<int64_t> equivalent = {};
215  if (aliasingReturnVals.size() == 1) {
216  equivalent = getEquivalentFuncArgIdx(funcOp, funcState,
217  aliasingReturnVals.front());
218  assert((!equivalent.has_value() ||
219  *equivalent == opOperand.getOperandNumber()) &&
220  "inconsistent analysis state");
221  }
222  AliasingValueList result;
223  for (int64_t resultIdx : aliasingReturnVals)
224  result.addAlias({callOp->getOpResult(resultIdx),
225  equivalent.has_value() ? BufferRelation::Equivalent
227  /*isDefinite=*/equivalent.has_value()});
228  return result;
229  }
230 
231  FailureOr<BufferLikeType>
233  const BufferizationState &state,
234  SmallVector<Value> &invocationStack) const {
235  auto callOp = cast<func::CallOp>(op);
236 
237  // TODO Avoid recomputing the symbol tables every time.
238  SymbolTableCollection symbolTable;
239 
240  FuncOp funcOp = getCalledFunction(callOp, symbolTable);
241  assert(funcOp && "expected CallOp to a FuncOp");
242 
243  // If the callee was already bufferized, we can directly take the type from
244  // its signature.
245  FunctionType funcType = funcOp.getFunctionType();
246  Type resultType =
247  funcType.getResult(cast<OpResult>(value).getResultNumber());
248  if (auto bufferizedType = dyn_cast<BufferLikeType>(resultType))
249  return bufferizedType;
250 
251  // Otherwise, call the type converter to compute the bufferized type.
252  auto tensorType = cast<TensorLikeType>(resultType);
253  return cast<BufferLikeType>(options.functionArgTypeConverterFn(
254  tensorType, getDefaultMemorySpace(options, tensorType), funcOp,
255  options));
256  }
257 
258  /// All function arguments are writable. It is the responsibility of the
259  /// CallOp to insert buffer copies where necessary.
260  LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
262  BufferizationState &state) const {
263  func::CallOp callOp = cast<func::CallOp>(op);
264 
265  // 1. Compute the result types of the new CallOp.
266  SmallVector<Type> resultTypes;
267  for (Value result : callOp.getResults()) {
268  Type returnType = result.getType();
269  if (!isa<TensorLikeType>(returnType)) {
270  // Non-tensor values are returned.
271  resultTypes.push_back(returnType);
272  continue;
273  }
274 
275  // Returning a memref.
276  FailureOr<BufferLikeType> resultType =
277  bufferization::getBufferType(result, options, state);
278  if (failed(resultType))
279  return failure();
280  resultTypes.push_back(*resultType);
281  }
282 
283  // 2. Rewrite tensor operands as memrefs based on type of the already
284  // bufferized callee.
285  SmallVector<Value> newOperands;
286 
287  FuncOp funcOp = getCalledFunction(callOp, state.getSymbolTables());
288  assert(funcOp && "expected CallOp to a FuncOp");
289  FunctionType funcType = funcOp.getFunctionType();
290 
291  for (OpOperand &opOperand : callOp->getOpOperands()) {
292  // Non-tensor operands are just copied.
293  if (!isa<TensorLikeType>(opOperand.get().getType())) {
294  newOperands.push_back(opOperand.get());
295  continue;
296  }
297 
298  // Retrieve buffers for tensor operands.
299  FailureOr<Value> maybeBuffer =
300  getBuffer(rewriter, opOperand.get(), options, state);
301  if (failed(maybeBuffer))
302  return failure();
303  Value buffer = *maybeBuffer;
304 
305  // Caller / callee type mismatch is handled with castOrReallocMemRefValue.
306  auto bufferType = funcType.getInput(opOperand.getOperandNumber());
307  if (!isa<BufferLikeType>(bufferType)) {
308  // The called function was not bufferized yet. This can happen when
309  // there cycles in the function call graph. Compute the bufferized
310  // result type.
311  FailureOr<BufferLikeType> maybeBufferType =
313  funcOp.getArgument(opOperand.getOperandNumber()), options,
314  state);
315  if (failed(maybeBufferType))
316  return failure();
317  bufferType = *maybeBufferType;
318  }
319 
320  // Since we don't yet have a clear layout story, to_buffer may
321  // conservatively turn tensors into more dynamic memref than necessary.
322  // If the memref type of the callee fails, introduce an extra memref.cast
323  // that will either canonicalize away or fail compilation until we can do
324  // something better. Insert a reallocation + copy if it cannot be
325  // statically guaranteed that a direct cast would be valid.
326  if (buffer.getType() != bufferType) {
327  auto memrefDstType = dyn_cast<MemRefType>(bufferType);
328  assert(memrefDstType &&
329  "buffer layout not supported on unranked tensors");
330  FailureOr<Value> replacement = bufferization::castOrReallocMemRefValue(
331  rewriter, buffer, memrefDstType, options);
332  if (failed(replacement))
333  return failure();
334  buffer = *replacement;
335  }
336  newOperands.push_back(buffer);
337  }
338 
339  // 3. Create the new CallOp.
340  Operation *newCallOp =
341  func::CallOp::create(rewriter, callOp.getLoc(), funcOp.getSymName(),
342  resultTypes, newOperands);
343  newCallOp->setAttrs(callOp->getAttrs());
344 
345  // 4. Replace the old op with the new op.
346  replaceOpWithBufferizedValues(rewriter, callOp, newCallOp->getResults());
347 
348  return success();
349  }
350 };
351 
353  : public BufferizableOpInterface::ExternalModel<ReturnOpInterface,
354  func::ReturnOp> {
356  const AnalysisState &state) const {
357  return true;
358  }
359 
361  const AnalysisState &state) const {
362  return false;
363  }
364 
366  const AnalysisState &state) const {
367  return {};
368  }
369 
370  LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
372  BufferizationState &state) const {
373 #ifndef NDEBUG
374  auto returnOp = cast<func::ReturnOp>(op);
375  assert(isa<FuncOp>(returnOp->getParentOp()) &&
376  "only support FuncOp parent for ReturnOp");
377 #endif // NDEBUG
378 
379  // ReturnOps are bufferized as part of FuncOps.
380  return success();
381  }
382 };
383 
386  FuncOpInterface, FuncOp> {
387 
388  static bool supportsUnstructuredControlFlow() { return true; }
389 
390  bool hasTensorSemantics(Operation *op) const {
391  auto isaTensor = llvm::IsaPred<TensorLikeType>;
392 
393  // A function has tensor semantics if it has tensor arguments/results.
394  auto funcOp = cast<FuncOp>(op);
395  bool hasTensorArg = any_of(funcOp.getArgumentTypes(), isaTensor);
396  bool hasTensorResult = any_of(funcOp.getResultTypes(), isaTensor);
397  if (hasTensorArg || hasTensorResult)
398  return true;
399 
400  // It also has tensor semantics if it has tensor block arguments.
401  // TODO: Decouple bufferization of unstructured control flow from
402  // BufferizableOpInterface implementations. We should only care about
403  // region entry block arguments here (which are already covered by the
404  // argument types of the function).
405  for (Block &block : funcOp.getBody())
406  if (any_of(block.getArgumentTypes(), isaTensor))
407  return true;
408 
409  return false;
410  }
411 
414  const AnalysisState &state) const {
415  return getAliasingBranchOpOperands(op, cast<BlockArgument>(value), state);
416  }
417 
418  FailureOr<BufferLikeType>
420  const BufferizationState &state,
421  SmallVector<Value> &invocationStack) const {
422  auto funcOp = cast<FuncOp>(op);
423  auto bbArg = cast<BlockArgument>(value);
424 
425  // Function arguments are special.
426  if (bbArg.getOwner() == &funcOp.getBody().front())
427  return getBufferizedFunctionArgType(funcOp, bbArg.getArgNumber(),
428  options);
429 
431  getBufferType(op, value, options, state, invocationStack);
432  }
433 
434  /// Rewrite function bbArgs and return values into buffer form. This function
435  /// bufferizes the function signature and the ReturnOp. When the entire
436  /// function body has been bufferized, function return types can be switched
437  /// to more concise memref types as part of `foldMemRefCasts`.
438  ///
439  /// All function bbArgs are writable unless they are explicitly marked as
440  /// read-only. Callers must insert copies when needed.
441  LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
443  BufferizationState &state) const {
444  auto funcOp = cast<FuncOp>(op);
445  FunctionType funcType = funcOp.getFunctionType();
446 
447  // Compute the argument types.
448  SmallVector<Type> argTypes;
449  for (const auto &it : llvm::enumerate(funcType.getInputs())) {
450  Type argType = it.value();
451  if (isa<TensorLikeType>(argType)) {
452  argTypes.push_back(
453  getBufferizedFunctionArgType(funcOp, it.index(), options));
454  continue;
455  }
456  argTypes.push_back(argType);
457  }
458 
459  // Compute the result types.
460  SmallVector<Type> retTypes;
461  for (Type resultType : funcType.getResults()) {
462  if (auto tensorType = dyn_cast<TensorLikeType>(resultType)) {
463  BufferLikeType resultType = options.functionArgTypeConverterFn(
464  tensorType, getDefaultMemorySpace(options, tensorType), funcOp,
465  options);
466  retTypes.push_back(resultType);
467  continue;
468  }
469  retTypes.push_back(resultType);
470  }
471 
472  // Compute the new function type.
473  auto newFuncType = FunctionType::get(op->getContext(), argTypes, retTypes);
474 
475  // If the function has no body, set the new function type and we are done.
476  if (funcOp.isExternal()) {
477  funcOp.setType(newFuncType);
478  return success();
479  }
480 
481  // 1. Bufferize every block.
482  for (Block &block : funcOp.getBody())
483  if (failed(bufferization::bufferizeBlockSignature(&block, rewriter,
484  options, state)))
485  return failure();
486 
487  // 2. Bufferize the operands of the all return op.
488  for (func::ReturnOp returnOp : getReturnOps(funcOp)) {
489  assert(returnOp->getNumOperands() == retTypes.size() &&
490  "incorrect number of return values");
491  SmallVector<Value> returnValues;
492  for (auto [returnVal, bufferizedType] :
493  llvm::zip_equal(returnOp->getOperands(), retTypes)) {
494  auto tensorType = dyn_cast<TensorLikeType>(returnVal.getType());
495  rewriter.setInsertionPoint(returnOp);
496 
497  // If not a tensor type just forward it.
498  if (!tensorType) {
499  returnValues.push_back(returnVal);
500  continue;
501  }
502 
503  // Note: If `inferFunctionResultLayout = true`, casts are later folded
504  // away.
505  Value toBufferOp = bufferization::ToBufferOp::create(
506  rewriter, returnOp.getLoc(), bufferizedType, returnVal);
507  returnValues.push_back(toBufferOp);
508  }
509 
510  returnOp.getOperandsMutable().assign(returnValues);
511  }
512 
513  // 3. Set the new function type.
514  funcOp.setType(newFuncType);
515  return success();
516  }
517 
518  /// Return `true` if the given function argument is writable.
519  bool isWritable(Operation *op, Value value,
520  const AnalysisState &state) const {
521  auto funcOp = cast<FuncOp>(op);
522  BlockArgument bbArg = dyn_cast<BlockArgument>(value);
523  assert(bbArg && "expected BlockArgument");
524 
525  // Non-entry block arguments are always writable. (They may alias with
526  // values that are not writable, which will turn them into read-only.)
527  if (bbArg.getOwner() != &funcOp.getBody().front())
528  return true;
529 
530  // "bufferization.writable" overrides other writability decisions. This is
531  // currently used for testing only.
532  if (BoolAttr writable = funcOp.getArgAttrOfType<BoolAttr>(
533  bbArg.getArgNumber(), BufferizationDialect::kWritableAttrName))
534  return writable.getValue();
535 
536  // All function arguments are writable by default.
537  return true;
538  }
539 };
540 
541 } // namespace func_ext
542 } // namespace bufferization
543 } // namespace mlir
544 
547  registry.addExtension(+[](MLIRContext *ctx, func::FuncDialect *dialect) {
548  func::CallOp::attachInterface<func_ext::CallOpInterface>(*ctx);
549  func::FuncOp::attachInterface<func_ext::FuncOpInterface>(*ctx);
550  func::ReturnOp::attachInterface<func_ext::ReturnOpInterface>(*ctx);
551  });
552 }
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class represents an argument of a Block.
Definition: Value.h:309
Block * getOwner() const
Returns the block that owns this argument.
Definition: Value.h:318
unsigned getArgNumber() const
Returns the number of this argument.
Definition: Value.h:321
Block represents an ordered list of Operations.
Definition: Block.h:33
Operation & front()
Definition: Block.h:153
Special case of IntegerAttr to represent boolean integers, i.e., signless i1 integers.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:398
This class represents an operand of an operation.
Definition: Value.h:257
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition: Value.cpp:226
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
void setAttrs(DictionaryAttr newAttrs)
Set the attributes from a dictionary on this operation.
Definition: Operation.cpp:305
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
result_range getResults()
Definition: Operation.h:415
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:368
This class represents a collection of SymbolTables.
Definition: SymbolTable.h:283
virtual Operation * lookupNearestSymbolFrom(Operation *from, StringAttr symbol)
Returns the operation registered with the given symbol name within the closest parent operation of,...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
AnalysisState provides a variety of helper functions for dealing with tensor values.
BufferizationState provides information about the state of the IR during the bufferization process.
State for analysis-enabled bufferization.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
static BufferLikeType getBufferizedFunctionArgType(FuncOp funcOp, int64_t index, const BufferizationOptions &options)
Return the index-th bufferized function argument type.
static std::optional< int64_t > getEquivalentFuncArgIdx(FuncOp funcOp, const FuncAnalysisState &state, int64_t returnValIdx)
Return the index of the bbArg in the given FuncOp that is equivalent to the specified return value (i...
FuncOpAnalysisState
The state of analysis of a FuncOp.
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
static FuncOp getCalledFunction(CallOpInterface callOp, SymbolTableCollection &symbolTables)
Return the FuncOp called by callOp.
static mlir::Attribute getDefaultMemorySpace(const BufferizationOptions &options, TensorLikeType type)
static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state, FuncOp funcOp)
Return the state (phase) of analysis of the FuncOp.
static const FuncAnalysisState & getFuncAnalysisState(const AnalysisState &state)
Get FuncAnalysisState.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
FailureOr< Value > castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options)
Try to cast the given ranked MemRef-typed value to the given ranked MemRef type.
SmallVector< func::ReturnOp > getReturnOps(func::FuncOp funcOp)
Helper function that returns all func.return ops in the given function.
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
Definition: Bufferize.cpp:393
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options, const BufferizationState &state)
Lookup the buffer for the given value.
FailureOr< BufferLikeType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition: Remarks.h:491
Include the generated interface declarations.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
A template that provides a default implementation of getAliasingOpOperands for ops that support unstr...
AliasingOpOperandList getAliasingBranchOpOperands(Operation *op, BlockArgument bbArg, const AnalysisState &state) const
Assuming that bbArg is a block argument of a block that belongs to the given op, return all OpOperand...
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const
All function arguments are writable.
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
Extra analysis state that is required for bufferization of function boundaries.
DenseMap< FuncOp, IndexMapping > equivalentFuncArgs
A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg indices.
DenseMap< FuncOp, IndexToIndexListMapping > aliasingReturnVals
A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
DenseMap< int64_t, SmallVector< int64_t > > IndexToIndexListMapping
A mapping of indices to a list of indices.
DenseMap< FuncOp, BbArgIndexSet > readBbArgs
A set of all read BlockArguments of FuncOps.
DenseSet< int64_t > BbArgIndexSet
A set of block argument indices.
DenseMap< FuncOp, BbArgIndexSet > writtenBbArgs
A set of all written-to BlockArguments of FuncOps.
DenseMap< FuncOp, FuncOpAnalysisState > analyzedFuncOps
Keep track of which FuncOps are fully analyzed or currently being analyzed.
void startFunctionAnalysis(FuncOp funcOp)
This function is called right before analyzing the given FuncOp.
DenseMap< int64_t, int64_t > IndexMapping
A mapping of indices to indices.
AliasingOpOperandList getAliasingOpOperands(Operation *op, Value value, const AnalysisState &state) const
bool isWritable(Operation *op, Value value, const AnalysisState &state) const
Return true if the given function argument is writable.
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const
Rewrite function bbArgs and return values into buffer form.
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const