MLIR  19.0.0git
OneShotModuleBufferize.cpp
Go to the documentation of this file.
1 //===- ModuleBufferization.cpp - Bufferization across Func. Boundaries ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Module Bufferization is an extension of One-Shot Bufferize that
10 // bufferizes function boundaries. It provides `BufferizableOpInterface`
11 // implementations for FuncOp, CallOp and ReturnOp.
12 //
13 // Module Bufferization is run via `runOneShotModuleBufferize(ModuleOp, ...)`.
14 // This function analyzes the given module and determines the order of analysis
15 // and bufferization: Functions that are called are processed before their
16 // respective callers.
17 //
18 // After analyzing a FuncOp, additional information about its bbArgs is
19 // gathered and stored in `FuncAnalysisState`.
20 //
21 // * `aliasingFuncOpBBArgsAnalysis` determines the equivalent/aliasing bbArgs
22 // for
23 // each tensor return value (if any).
24 // * `funcOpBbArgReadWriteAnalysis` determines whether or not a tensor bbArg is
25 // read/written.
26 //
27 // Module Bufferization implements the following calling convention.
28 //
29 // * In the absence of conflicts within a FuncOp, the FuncOp's bbArgs may always
30 // be written to in-place.
31 // * If a tensor operand of a CallOp is read after the CallOp, the operand of
32 // the CallOp must bufferize out-of-place.
33 //
34 // Example: The tensor.insert op bufferizes in-place because it is allowed to
35 // modify the buffer of `%t1` directly. The CallOp in `caller` must bufferize
36 // out-of-place because `%t0` is modified by the callee but read by the
37 // tensor.extract op. The analysis of CallOps decides whether an OpOperand must
38 // bufferize out-of-place based on results of `funcOpBbArgReadWriteAnalysis`.
39 // ```
40 // func @callee(%t1 : tensor<?xf32>) -> tensor<?xf32> {
41 // %f = ... : f32
42 // %0 = tensor.insert %f into %t1[...] : tensor<?xf32>
43 // return %0 : tensor<?xf32>
44 // }
45 //
46 // func @caller() -> () {
47 // %t0 = ... : tensor<?xf32>
48 // %1 = call @callee(%t0) : (tensor<?xf32>) -> (tensor<?xf32>)
49 // %2 = tensor.extract %1[...] : tensor<?xf32>
50 // }
51 // ```
52 //
53 // Note: If a function is external, `funcOpBbArgReadWriteAnalysis` cannot
54 // analyze the function body. In such a case, the CallOp analysis conservatively
55 // assumes that each tensor OpOperand is both read and written.
56 //
57 // TODO: Add FuncOp attributes so that bbArgs of external FuncOps can be marked
58 // as "not reading" and/or "not writing".
59 
61 
70 #include "mlir/IR/Operation.h"
71 
72 using namespace mlir;
73 using namespace mlir::bufferization;
74 using namespace mlir::bufferization::func_ext;
75 
76 /// A mapping of FuncOps to their callers.
78 
79 /// Get or create FuncAnalysisState.
80 static FuncAnalysisState &
82  auto *result = state.getExtension<FuncAnalysisState>();
83  if (result)
84  return *result;
85  return state.addExtension<FuncAnalysisState>();
86 }
87 
88 /// Return the unique ReturnOp that terminates `funcOp`.
89 /// Return nullptr if there is no such unique ReturnOp.
90 static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) {
91  func::ReturnOp returnOp;
92  for (Block &b : funcOp.getBody()) {
93  if (auto candidateOp = dyn_cast<func::ReturnOp>(b.getTerminator())) {
94  if (returnOp)
95  return nullptr;
96  returnOp = candidateOp;
97  }
98  }
99  return returnOp;
100 }
101 
102 namespace {
103 
104 /// Annotate IR with the results of the analysis. For testing purposes only.
105 static void annotateEquivalentReturnBbArg(OpOperand &returnVal,
106  BlockArgument bbArg) {
107  const char *kEquivalentArgsAttr = "__equivalent_func_args__";
108  Operation *op = returnVal.getOwner();
109 
110  SmallVector<int64_t> equivBbArgs;
111  if (op->hasAttr(kEquivalentArgsAttr)) {
112  auto attr = cast<ArrayAttr>(op->getAttr(kEquivalentArgsAttr));
113  equivBbArgs = llvm::to_vector<4>(llvm::map_range(attr, [](Attribute a) {
114  return cast<IntegerAttr>(a).getValue().getSExtValue();
115  }));
116  } else {
117  equivBbArgs.append(op->getNumOperands(), -1);
118  }
119  equivBbArgs[returnVal.getOperandNumber()] = bbArg.getArgNumber();
120 
121  OpBuilder b(op->getContext());
122  op->setAttr(kEquivalentArgsAttr, b.getI64ArrayAttr(equivBbArgs));
123 }
124 
125 /// Store function BlockArguments that are equivalent to/aliasing a returned
126 /// value in FuncAnalysisState.
127 static LogicalResult
128 aliasingFuncOpBBArgsAnalysis(FuncOp funcOp, OneShotAnalysisState &state,
129  FuncAnalysisState &funcState) {
130  if (funcOp.getBody().empty()) {
131  // No function body available. Conservatively assume that every tensor
132  // return value may alias with any tensor bbArg.
133  FunctionType type = funcOp.getFunctionType();
134  for (const auto &inputIt : llvm::enumerate(type.getInputs())) {
135  if (!isa<TensorType>(inputIt.value()))
136  continue;
137  for (const auto &resultIt : llvm::enumerate(type.getResults())) {
138  if (!isa<TensorType>(resultIt.value()))
139  continue;
140  int64_t returnIdx = resultIt.index();
141  int64_t bbArgIdx = inputIt.index();
142  funcState.aliasingReturnVals[funcOp][bbArgIdx].push_back(returnIdx);
143  }
144  }
145  return success();
146  }
147 
148  // Support only single return-terminated block in the function.
149  func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
150  assert(returnOp && "expected func with single return op");
151 
152  for (OpOperand &returnVal : returnOp->getOpOperands())
153  if (isa<RankedTensorType>(returnVal.get().getType()))
154  for (BlockArgument bbArg : funcOp.getArguments())
155  if (isa<RankedTensorType>(bbArg.getType())) {
156  int64_t returnIdx = returnVal.getOperandNumber();
157  int64_t bbArgIdx = bbArg.getArgNumber();
158  if (state.areEquivalentBufferizedValues(returnVal.get(), bbArg)) {
159  funcState.equivalentFuncArgs[funcOp][returnIdx] = bbArgIdx;
160  if (state.getOptions().testAnalysisOnly)
161  annotateEquivalentReturnBbArg(returnVal, bbArg);
162  }
163  if (state.areAliasingBufferizedValues(returnVal.get(), bbArg))
164  funcState.aliasingReturnVals[funcOp][bbArgIdx].push_back(returnIdx);
165  }
166 
167  return success();
168 }
169 
170 static void annotateFuncArgAccess(func::FuncOp funcOp, int64_t idx, bool isRead,
171  bool isWritten) {
172  OpBuilder b(funcOp.getContext());
173  Attribute accessType;
174  if (isRead && isWritten) {
175  accessType = b.getStringAttr("read-write");
176  } else if (isRead) {
177  accessType = b.getStringAttr("read");
178  } else if (isWritten) {
179  accessType = b.getStringAttr("write");
180  } else {
181  accessType = b.getStringAttr("none");
182  }
183  funcOp.setArgAttr(idx, BufferizationDialect::kBufferAccessAttrName,
184  accessType);
185 }
186 
187 /// Determine which FuncOp bbArgs are read and which are written. When run on a
188 /// function with unknown ops, we conservatively assume that such ops bufferize
189 /// to a read + write.
190 static LogicalResult
191 funcOpBbArgReadWriteAnalysis(FuncOp funcOp, OneShotAnalysisState &state,
192  FuncAnalysisState &funcState) {
193  for (int64_t idx = 0, e = funcOp.getFunctionType().getNumInputs(); idx < e;
194  ++idx) {
195  // Skip non-tensor arguments.
196  if (!isa<TensorType>(funcOp.getFunctionType().getInput(idx)))
197  continue;
198  bool isRead;
199  bool isWritten;
200  if (auto accessAttr = funcOp.getArgAttrOfType<StringAttr>(
201  idx, BufferizationDialect::kBufferAccessAttrName)) {
202  // Buffer access behavior is specified on the function. Skip the analysis.
203  StringRef str = accessAttr.getValue();
204  isRead = str == "read" || str == "read-write";
205  isWritten = str == "write" || str == "read-write";
206  } else if (funcOp.getBody().empty()) {
207  // If the function has no body, conservatively assume that all args are
208  // read + written.
209  isRead = true;
210  isWritten = true;
211  } else {
212  // Analyze the body of the function.
213  BlockArgument bbArg = funcOp.getArgument(idx);
214  isRead = state.isValueRead(bbArg);
215  isWritten = state.isValueWritten(bbArg);
216  }
217 
218  if (state.getOptions().testAnalysisOnly)
219  annotateFuncArgAccess(funcOp, idx, isRead, isWritten);
220  if (isRead)
221  funcState.readBbArgs[funcOp].insert(idx);
222  if (isWritten)
223  funcState.writtenBbArgs[funcOp].insert(idx);
224  }
225 
226  return success();
227 }
228 } // namespace
229 
230 /// Remove bufferization attributes on FuncOp arguments.
232  auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
233  funcOp.removeArgAttr(bbArg.getArgNumber(),
234  BufferizationDialect::kBufferLayoutAttrName);
235  funcOp.removeArgAttr(bbArg.getArgNumber(),
236  BufferizationDialect::kWritableAttrName);
237 }
238 
239 /// Return the func::FuncOp called by `callOp`.
240 static func::FuncOp getCalledFunction(func::CallOp callOp) {
241  SymbolRefAttr sym =
242  llvm::dyn_cast_if_present<SymbolRefAttr>(callOp.getCallableForCallee());
243  if (!sym)
244  return nullptr;
245  return dyn_cast_or_null<func::FuncOp>(
247 }
248 
249 /// Gather equivalence info of CallOps.
250 /// Note: This only adds new equivalence info if the called function was already
251 /// analyzed.
252 // TODO: This does not handle cyclic function call graphs etc.
253 static void equivalenceAnalysis(func::FuncOp funcOp,
254  OneShotAnalysisState &state,
255  FuncAnalysisState &funcState) {
256  funcOp->walk([&](func::CallOp callOp) {
257  func::FuncOp calledFunction = getCalledFunction(callOp);
258  assert(calledFunction && "could not retrieved called func::FuncOp");
259 
260  // No equivalence info available for the called function.
261  if (!funcState.equivalentFuncArgs.count(calledFunction))
262  return WalkResult::skip();
263 
264  for (auto it : funcState.equivalentFuncArgs[calledFunction]) {
265  int64_t returnIdx = it.first;
266  int64_t bbargIdx = it.second;
267  if (!state.isInPlace(callOp->getOpOperand(bbargIdx)))
268  continue;
269  Value returnVal = callOp.getResult(returnIdx);
270  Value argVal = callOp->getOperand(bbargIdx);
271  state.unionEquivalenceClasses(returnVal, argVal);
272  }
273 
274  return WalkResult::advance();
275  });
276 }
277 
278 /// Return "true" if the given function signature has tensor semantics.
279 static bool hasTensorSignature(func::FuncOp funcOp) {
280  auto isaTensor = [](Type t) { return isa<TensorType>(t); };
281  return llvm::any_of(funcOp.getFunctionType().getInputs(), isaTensor) ||
282  llvm::any_of(funcOp.getFunctionType().getResults(), isaTensor);
283 }
284 
285 /// Store all functions of the `moduleOp` in `orderedFuncOps`, sorted by
286 /// callee-caller order (i.e. callees without callers first).
287 /// Store the map of FuncOp to all its callers in `callerMap`.
288 /// Return `failure()` if a cycle of calls is detected or if we are unable to
289 /// retrieve the called FuncOp from any func::CallOp.
290 static LogicalResult
291 getFuncOpsOrderedByCalls(ModuleOp moduleOp,
292  SmallVectorImpl<func::FuncOp> &orderedFuncOps,
293  FuncCallerMap &callerMap) {
294  // For each FuncOp, the set of functions called by it (i.e. the union of
295  // symbols of all nested func::CallOp).
297  // For each FuncOp, the number of func::CallOp it contains.
298  DenseMap<func::FuncOp, unsigned> numberCallOpsContainedInFuncOp;
299  WalkResult res = moduleOp.walk([&](func::FuncOp funcOp) -> WalkResult {
300  if (!funcOp.getBody().empty()) {
301  func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
302  if (!returnOp)
303  return funcOp->emitError()
304  << "cannot bufferize a FuncOp with tensors and "
305  "without a unique ReturnOp";
306  }
307 
308  // Collect function calls and populate the caller map.
309  numberCallOpsContainedInFuncOp[funcOp] = 0;
310  return funcOp.walk([&](func::CallOp callOp) -> WalkResult {
311  func::FuncOp calledFunction = getCalledFunction(callOp);
312  assert(calledFunction && "could not retrieved called func::FuncOp");
313  // If the called function does not have any tensors in its signature, then
314  // it is not necessary to bufferize the callee before the caller.
315  if (!hasTensorSignature(calledFunction))
316  return WalkResult::skip();
317 
318  callerMap[calledFunction].insert(callOp);
319  if (calledBy[calledFunction].insert(funcOp).second) {
320  numberCallOpsContainedInFuncOp[funcOp]++;
321  }
322  return WalkResult::advance();
323  });
324  });
325  if (res.wasInterrupted())
326  return failure();
327  // Iteratively remove function operations that do not call any of the
328  // functions remaining in the callCounter map and add them to the worklist.
329  while (!numberCallOpsContainedInFuncOp.empty()) {
330  auto it = llvm::find_if(numberCallOpsContainedInFuncOp,
331  [](auto entry) { return entry.getSecond() == 0; });
332  if (it == numberCallOpsContainedInFuncOp.end())
333  return moduleOp.emitOpError(
334  "expected callgraph to be free of circular dependencies.");
335  orderedFuncOps.push_back(it->getFirst());
336  for (auto callee : calledBy[it->getFirst()])
337  numberCallOpsContainedInFuncOp[callee]--;
338  numberCallOpsContainedInFuncOp.erase(it);
339  }
340  return success();
341 }
342 
343 /// Fold return values that are memref casts and update function return types.
344 ///
345 /// During FuncOp bufferization, the exact type of the returned memrefs (if any)
346 /// is not known yet. Therefore, the bufferization uses memref types with the
347 /// most generic layout map as function return types. After bufferizing the
348 /// entire function body, a more concise memref type can potentially be used for
349 /// the return type of the function.
350 static void foldMemRefCasts(func::FuncOp funcOp) {
351  if (funcOp.getBody().empty())
352  return;
353 
354  func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
355  SmallVector<Type> resultTypes;
356 
357  for (OpOperand &operand : returnOp->getOpOperands()) {
358  if (auto castOp = operand.get().getDefiningOp<memref::CastOp>()) {
359  operand.set(castOp.getSource());
360  resultTypes.push_back(castOp.getSource().getType());
361  } else {
362  resultTypes.push_back(operand.get().getType());
363  }
364  }
365 
366  auto newFuncType = FunctionType::get(
367  funcOp.getContext(), funcOp.getFunctionType().getInputs(), resultTypes);
368  funcOp.setType(newFuncType);
369 }
370 
373  OneShotAnalysisState &state,
374  BufferizationStatistics *statistics) {
375  assert(state.getOptions().bufferizeFunctionBoundaries &&
376  "expected that function boundary bufferization is activated");
378 
379  // A list of functions in the order in which they are analyzed + bufferized.
380  SmallVector<func::FuncOp> orderedFuncOps;
381 
382  // A mapping of FuncOps to their callers.
383  FuncCallerMap callerMap;
384 
385  if (failed(getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps, callerMap)))
386  return failure();
387 
388  // Analyze ops.
389  for (func::FuncOp funcOp : orderedFuncOps) {
390  if (!state.getOptions().isOpAllowed(funcOp))
391  continue;
392 
393  // Now analyzing function.
394  funcState.startFunctionAnalysis(funcOp);
395 
396  // Gather equivalence info for CallOps.
397  equivalenceAnalysis(funcOp, state, funcState);
398 
399  // Analyze funcOp.
400  if (failed(analyzeOp(funcOp, state, statistics)))
401  return failure();
402 
403  // Run some extra function analyses.
404  if (failed(aliasingFuncOpBBArgsAnalysis(funcOp, state, funcState)) ||
405  failed(funcOpBbArgReadWriteAnalysis(funcOp, state, funcState)))
406  return failure();
407 
408  // Mark op as fully analyzed.
409  funcState.analyzedFuncOps[funcOp] = FuncOpAnalysisState::Analyzed;
410  }
411 
412  return success();
413 }
414 
416  ModuleOp moduleOp) {
417  moduleOp.walk([&](func::FuncOp op) {
418  for (BlockArgument bbArg : op.getArguments())
420  });
421 }
422 
424  ModuleOp moduleOp, const OneShotBufferizationOptions &options,
425  BufferizationStatistics *statistics) {
426  assert(options.bufferizeFunctionBoundaries &&
427  "expected that function boundary bufferization is activated");
428  IRRewriter rewriter(moduleOp.getContext());
429 
430  // A list of functions in the order in which they are analyzed + bufferized.
431  SmallVector<func::FuncOp> orderedFuncOps;
432 
433  // A mapping of FuncOps to their callers.
434  FuncCallerMap callerMap;
435 
436  if (failed(getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps, callerMap)))
437  return failure();
438 
439  // Bufferize functions.
440  for (func::FuncOp funcOp : orderedFuncOps) {
441  // Note: It would be good to apply cleanups here but we cannot as aliasInfo
442  // would be invalidated.
443 
444  if (llvm::is_contained(options.noAnalysisFuncFilter, funcOp.getSymName())) {
445  // This function was not analyzed and RaW conflicts were not resolved.
446  // Buffer copies must be inserted before every write.
447  OneShotBufferizationOptions updatedOptions = options;
448  updatedOptions.copyBeforeWrite = true;
449  if (failed(bufferizeOp(funcOp, updatedOptions, statistics)))
450  return failure();
451  } else {
452  if (failed(bufferizeOp(funcOp, options, statistics)))
453  return failure();
454  }
455 
456  // Change buffer return types to more precise layout maps.
457  if (options.inferFunctionResultLayout)
458  foldMemRefCasts(funcOp);
459  }
460 
461  // Bufferize all other ops.
462  for (Operation &op : llvm::make_early_inc_range(moduleOp.getOps())) {
463  // Functions were already bufferized.
464  if (isa<func::FuncOp>(&op))
465  continue;
466  if (failed(bufferizeOp(&op, options, statistics)))
467  return failure();
468  }
469 
470  // Post-pass cleanup of function argument attributes.
472 
473  return success();
474 }
475 
477  ModuleOp moduleOp, const OneShotBufferizationOptions &options,
478  BufferizationStatistics *statistics) {
479  assert(options.bufferizeFunctionBoundaries &&
480  "expected that function boundary bufferization is activated");
481  assert(!(options.copyBeforeWrite && options.testAnalysisOnly) &&
482  "invalid combination of bufferization flags");
483  if (!options.copyBeforeWrite) {
484  if (options.noAnalysisFuncFilter.empty()) {
485  if (failed(insertTensorCopies(moduleOp, options, statistics)))
486  return failure();
487  } else {
488  // FuncOps whose names are specified in options.noAnalysisFuncFilter will
489  // not be analyzed. Ops in these FuncOps will not be analyzed as well.
490  OpFilter::Entry::FilterFn analysisFilterFn = [=](Operation *op) {
491  auto func = dyn_cast<func::FuncOp>(op);
492  if (!func)
493  func = op->getParentOfType<func::FuncOp>();
494  if (func)
495  return llvm::is_contained(options.noAnalysisFuncFilter,
496  func.getSymName());
497  return false;
498  };
499  OneShotBufferizationOptions updatedOptions(options);
500  updatedOptions.opFilter.denyOperation(analysisFilterFn);
501  if (failed(insertTensorCopies(moduleOp, updatedOptions, statistics)))
502  return failure();
503  }
504  }
505  if (options.testAnalysisOnly)
506  return success();
507  if (failed(bufferizeModuleOp(moduleOp, options, statistics)))
508  return failure();
509  return success();
510 }
static bool isaTensor(Type t)
static bool hasTensorSignature(func::FuncOp funcOp)
Return "true" if the given function signature has tensor semantics.
static LogicalResult getFuncOpsOrderedByCalls(ModuleOp moduleOp, SmallVectorImpl< func::FuncOp > &orderedFuncOps, FuncCallerMap &callerMap)
Store all functions of the moduleOp in orderedFuncOps, sorted by callee-caller order (i....
static FuncAnalysisState & getOrCreateFuncAnalysisState(OneShotAnalysisState &state)
Get or create FuncAnalysisState.
static void removeBufferizationAttributes(BlockArgument bbArg)
Remove bufferization attributes on FuncOp arguments.
static void foldMemRefCasts(func::FuncOp funcOp)
Fold return values that are memref casts and update function return types.
static void equivalenceAnalysis(func::FuncOp funcOp, OneShotAnalysisState &state, FuncAnalysisState &funcState)
Gather equivalence info of CallOps.
static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp)
Return the unique ReturnOp that terminates funcOp.
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class represents an argument of a Block.
Definition: Value.h:315
Block * getOwner() const
Returns the block that owns this argument.
Definition: Value.h:324
unsigned getArgNumber() const
Returns the number of this argument.
Definition: Value.h:327
Block represents an ordered list of Operations.
Definition: Block.h:30
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition: Block.cpp:30
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
Definition: PatternMatch.h:756
This class helps build Operations.
Definition: Builders.h:209
This class represents an operand of an operation.
Definition: Value.h:263
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition: Value.cpp:216
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
Definition: Operation.h:529
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
Definition: Operation.h:555
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
unsigned getNumOperands()
Definition: Operation.h:341
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition: Operation.h:238
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
Definition: Operation.h:577
static Operation * lookupNearestSymbolFrom(Operation *from, StringAttr symbol)
Returns the operation registered with the given symbol name within the closest parent operation of,...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:125
A utility result that is used to signal how to proceed with an ongoing walk:
Definition: Visitors.h:34
static WalkResult skip()
Definition: Visitors.h:53
static WalkResult advance()
Definition: Visitors.h:52
State for analysis-enabled bufferization.
void denyOperation()
Deny the given ops.
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
static func::ReturnOp getAssumedUniqueReturnOp(FuncOp funcOp)
Return the unique ReturnOp that terminates funcOp.
static FuncOp getCalledFunction(CallOpInterface callOp)
Return the FuncOp called by callOp.
LogicalResult analyzeOp(Operation *op, OneShotAnalysisState &state, BufferizationStatistics *statistics=nullptr)
Analyze op and its nested ops.
LogicalResult runOneShotModuleBufferize(ModuleOp moduleOp, const bufferization::OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Module Bufferization on the given module.
LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
Definition: Bufferize.cpp:434
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
LogicalResult analyzeModuleOp(ModuleOp moduleOp, OneShotAnalysisState &state, BufferizationStatistics *statistics=nullptr)
Analyze moduleOp and its nested ops.
void removeBufferizationAttributesInModule(ModuleOp moduleOp)
Remove bufferization attributes on every FuncOp arguments in the ModuleOp.
LogicalResult bufferizeModuleOp(ModuleOp moduleOp, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:285
Include the generated interface declarations.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
bool copyBeforeWrite
If set to true, the analysis is skipped.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
Bufferization statistics for debugging.
Definition: Bufferize.h:34
Options for analysis-enabled bufferization.
std::function< bool(Operation *)> FilterFn
If the filter function evaluates to true, the filter matches.
Extra analysis state that is required for bufferization of function boundaries.
DenseMap< FuncOp, IndexMapping > equivalentFuncArgs
A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg indices.
DenseMap< FuncOp, IndexToIndexListMapping > aliasingReturnVals
A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
DenseMap< FuncOp, BbArgIndexSet > readBbArgs
A set of all read BlockArguments of FuncOps.
DenseMap< FuncOp, BbArgIndexSet > writtenBbArgs
A set of all written-to BlockArguments of FuncOps.
DenseMap< FuncOp, FuncOpAnalysisState > analyzedFuncOps
Keep track of which FuncOps are fully analyzed or currently being analyzed.
void startFunctionAnalysis(FuncOp funcOp)
This function is called right before analyzing the given FuncOp.