MLIR 22.0.0git
FuncBufferizableOpInterfaceImpl.cpp
Go to the documentation of this file.
1//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "mlir/IR/Dialect.h"
17#include "mlir/IR/Operation.h"
18#include <optional>
19
20namespace mlir {
21/// Return all func.return ops in the given function.
24 for (Block &b : funcOp.getBody())
25 if (auto returnOp = dyn_cast<func::ReturnOp>(b.getTerminator()))
26 result.push_back(returnOp);
27 return result;
28}
29
30namespace bufferization {
31namespace func_ext {
32
35 auto createdEquiv = equivalentFuncArgs.try_emplace(funcOp, IndexMapping());
36 auto createdAliasingResults =
37 aliasingReturnVals.try_emplace(funcOp, IndexToIndexListMapping());
38 auto createdRead = readBbArgs.try_emplace(funcOp, BbArgIndexSet());
39 auto createdWritten = writtenBbArgs.try_emplace(funcOp, BbArgIndexSet());
40 (void)createdEquiv;
41 (void)createdAliasingResults;
42 (void)createdRead;
43 (void)createdWritten;
44#ifndef NDEBUG
45 assert(createdEquiv.second && "equivalence info exists already");
46 assert(createdAliasingResults.second && "aliasing info exists already");
47 assert(createdRead.second && "bbarg access info exists already");
48 assert(createdWritten.second && "bbarg access info exists already");
49#endif // NDEBUG
50}
51
52// Note: this is a local adaptor to unify TensorType and TensorLikeType code
53// paths that both work with BufferizationOptions.
54static mlir::Attribute
56 TensorLikeType type) {
57 if (auto tensorType = dyn_cast<TensorType>(type)) {
58 return *options.defaultMemorySpaceFn(tensorType);
59 }
60 return nullptr;
61}
62
63/// Return the index-th bufferized function argument type. This assumes that the
64/// specified argument is a tensor. If the tensor is ranked, a layout map may be
65/// specified by the user (as per `options.functionArgTypeConverterFn`).
66static BufferLikeType
69 auto type =
70 dyn_cast<TensorLikeType>(funcOp.getFunctionType().getInput(index));
71 assert(type && "expected TensorLikeType");
72
73 // Note: For builtin tensors there is additional logic related to layout.
74 if (auto tensorType = dyn_cast<TensorType>(type)) {
75 BufferLikeType memrefType = options.functionArgTypeConverterFn(
76 type, *options.defaultMemorySpaceFn(tensorType), funcOp, options);
77
78 auto layoutAttr = funcOp.getArgAttrOfType<MemRefLayoutAttrInterface>(
79 index, BufferizationDialect::kBufferLayoutAttrName);
80 if (!layoutAttr)
81 return memrefType;
82
83 auto rankedMemrefType = dyn_cast<MemRefType>(memrefType);
84 assert(rankedMemrefType &&
85 "buffer layout not supported on unranked tensors");
86 return cast<BufferLikeType>(MemRefType::get(
87 rankedMemrefType.getShape(), rankedMemrefType.getElementType(),
88 layoutAttr, rankedMemrefType.getMemorySpace()));
89 }
90
91 return options.functionArgTypeConverterFn(type, /*memSpace=*/nullptr, funcOp,
92 options);
93}
94
95/// Return the FuncOp called by `callOp`.
97 SymbolTableCollection &symbolTables) {
98 return dyn_cast_or_null<FuncOp>(callOp.resolveCallableInTable(&symbolTables));
99}
100
101/// Return the FuncOp called by `callOp`.
103 const AnalysisState &state) {
104 auto &oneShotAnalysisState = static_cast<const OneShotAnalysisState &>(state);
105
106 if (auto *funcAnalysisState =
107 oneShotAnalysisState.getExtension<FuncAnalysisState>()) {
108 // Use the cached symbol tables.
109 return getCalledFunction(callOp, funcAnalysisState->symbolTables);
110 }
111
112 SymbolTableCollection symbolTables;
113 return getCalledFunction(callOp, symbolTables);
114}
115
116/// Get FuncAnalysisState.
117static const FuncAnalysisState &
119 assert(isa<OneShotAnalysisState>(state) && "expected OneShotAnalysisState");
120 auto *result = static_cast<const OneShotAnalysisState &>(state)
121 .getExtension<FuncAnalysisState>();
122 assert(result && "FuncAnalysisState does not exist");
123 return *result;
124}
125
126/// Return the state (phase) of analysis of the FuncOp.
128 FuncOp funcOp) {
129 if (!isa<OneShotAnalysisState>(state))
131 auto *funcState = static_cast<const OneShotAnalysisState &>(state)
132 .getExtension<FuncAnalysisState>();
133 if (!funcState)
135 const auto &analyzedFuncOps = funcState->analyzedFuncOps;
136 auto it = analyzedFuncOps.find(funcOp);
137 if (it == analyzedFuncOps.end())
139 return it->second;
140}
141
142/// Return the index of the bbArg in the given FuncOp that is equivalent to the
143/// specified return value (if any).
144static std::optional<int64_t>
145getEquivalentFuncArgIdx(FuncOp funcOp, const FuncAnalysisState &state,
146 int64_t returnValIdx) {
147 auto funcOpIt = state.equivalentFuncArgs.find(funcOp);
148 if (funcOpIt == state.equivalentFuncArgs.end())
149 // No equivalence info stores for funcOp.
150 return std::nullopt;
151
152 auto retValIt = funcOpIt->getSecond().find(returnValIdx);
153 if (retValIt == funcOpIt->getSecond().end())
154 // Return value has no equivalent bbArg.
155 return std::nullopt;
156
157 return retValIt->getSecond();
158}
159
161 : public BufferizableOpInterface::ExternalModel<CallOpInterface,
162 func::CallOp> {
164 const AnalysisState &state) const {
165 func::CallOp callOp = cast<func::CallOp>(op);
166 FuncOp funcOp = getCalledFunction(callOp, state);
167 assert(funcOp && "expected CallOp to a FuncOp");
168
170 // FuncOp not analyzed yet. Assume that OpOperand is read.
171 return true;
172
173 const FuncAnalysisState &funcState = getFuncAnalysisState(state);
174 return funcState.readBbArgs.lookup(funcOp).contains(
175 opOperand.getOperandNumber());
176 }
177
179 const AnalysisState &state) const {
180 func::CallOp callOp = cast<func::CallOp>(op);
181 FuncOp funcOp = getCalledFunction(callOp, state);
182 assert(funcOp && "expected CallOp to a FuncOp");
183
185 // FuncOp not analyzed yet. Assume that OpOperand is written.
186 return true;
187
188 const FuncAnalysisState &funcState = getFuncAnalysisState(state);
189 return funcState.writtenBbArgs.lookup(funcOp).contains(
190 opOperand.getOperandNumber());
191 }
192
193 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
194 const AnalysisState &state) const {
195 func::CallOp callOp = cast<func::CallOp>(op);
196 FuncOp funcOp = getCalledFunction(callOp, state);
197 assert(funcOp && "expected CallOp to a FuncOp");
199 // FuncOp not analyzed yet. Any OpResult may be aliasing.
200 return detail::unknownGetAliasingValues(opOperand);
201
202 // Get aliasing results from state.
203 const FuncAnalysisState &funcState = getFuncAnalysisState(state);
204 auto aliasingReturnVals =
205 funcState.aliasingReturnVals.lookup(funcOp).lookup(
206 opOperand.getOperandNumber());
207
208 // Check if the aliasing OpResult is equivalent to the OpOperand.
209 std::optional<int64_t> equivalent = {};
210 if (aliasingReturnVals.size() == 1) {
211 equivalent = getEquivalentFuncArgIdx(funcOp, funcState,
212 aliasingReturnVals.front());
213 assert((!equivalent.has_value() ||
214 *equivalent == opOperand.getOperandNumber()) &&
215 "inconsistent analysis state");
216 }
217 AliasingValueList result;
218 for (int64_t resultIdx : aliasingReturnVals)
219 result.addAlias({callOp->getOpResult(resultIdx),
220 equivalent.has_value() ? BufferRelation::Equivalent
221 : BufferRelation::Unknown,
222 /*isDefinite=*/equivalent.has_value()});
223 return result;
224 }
225
226 FailureOr<BufferLikeType>
228 const BufferizationState &state,
229 SmallVector<Value> &invocationStack) const {
230 auto callOp = cast<func::CallOp>(op);
231
232 // TODO Avoid recomputing the symbol tables every time.
233 SymbolTableCollection symbolTable;
234
235 FuncOp funcOp = getCalledFunction(callOp, symbolTable);
236 assert(funcOp && "expected CallOp to a FuncOp");
237
238 // If the callee was already bufferized, we can directly take the type from
239 // its signature.
240 FunctionType funcType = funcOp.getFunctionType();
241 Type resultType =
242 funcType.getResult(cast<OpResult>(value).getResultNumber());
243 if (auto bufferizedType = dyn_cast<BufferLikeType>(resultType))
244 return bufferizedType;
245
246 // Otherwise, call the type converter to compute the bufferized type.
247 auto tensorType = cast<TensorLikeType>(resultType);
248 return cast<BufferLikeType>(options.functionArgTypeConverterFn(
249 tensorType, getDefaultMemorySpace(options, tensorType), funcOp,
250 options));
251 }
252
253 /// All function arguments are writable. It is the responsibility of the
254 /// CallOp to insert buffer copies where necessary.
255 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
257 BufferizationState &state) const {
258 func::CallOp callOp = cast<func::CallOp>(op);
259
260 // 1. Compute the result types of the new CallOp.
261 SmallVector<Type> resultTypes;
262 for (Value result : callOp.getResults()) {
263 Type returnType = result.getType();
264 if (!isa<TensorLikeType>(returnType)) {
265 // Non-tensor values are returned.
266 resultTypes.push_back(returnType);
267 continue;
268 }
269
270 // Returning a memref.
271 FailureOr<BufferLikeType> resultType =
272 bufferization::getBufferType(result, options, state);
273 if (failed(resultType))
274 return failure();
275 resultTypes.push_back(*resultType);
276 }
277
278 // 2. Rewrite tensor operands as memrefs based on type of the already
279 // bufferized callee.
280 SmallVector<Value> newOperands;
281
282 FuncOp funcOp = getCalledFunction(callOp, state.getSymbolTables());
283 assert(funcOp && "expected CallOp to a FuncOp");
284 FunctionType funcType = funcOp.getFunctionType();
285
286 for (OpOperand &opOperand : callOp->getOpOperands()) {
287 // Non-tensor operands are just copied.
288 if (!isa<TensorLikeType>(opOperand.get().getType())) {
289 newOperands.push_back(opOperand.get());
290 continue;
291 }
292
293 // Retrieve buffers for tensor operands.
294 FailureOr<Value> maybeBuffer =
295 getBuffer(rewriter, opOperand.get(), options, state);
296 if (failed(maybeBuffer))
297 return failure();
298 Value buffer = *maybeBuffer;
299
300 // Caller / callee type mismatch is handled with castOrReallocMemRefValue.
301 auto bufferType = funcType.getInput(opOperand.getOperandNumber());
302 if (!isa<BufferLikeType>(bufferType)) {
303 // The called function was not bufferized yet. This can happen when
304 // there cycles in the function call graph. Compute the bufferized
305 // result type.
306 FailureOr<BufferLikeType> maybeBufferType =
307 bufferization::getBufferType(
308 funcOp.getArgument(opOperand.getOperandNumber()), options,
309 state);
310 if (failed(maybeBufferType))
311 return failure();
312 bufferType = *maybeBufferType;
313 }
314
315 // Since we don't yet have a clear layout story, to_buffer may
316 // conservatively turn tensors into more dynamic memref than necessary.
317 // If the memref type of the callee fails, introduce an extra memref.cast
318 // that will either canonicalize away or fail compilation until we can do
319 // something better. Insert a reallocation + copy if it cannot be
320 // statically guaranteed that a direct cast would be valid.
321 if (buffer.getType() != bufferType) {
322 auto memrefDstType = dyn_cast<MemRefType>(bufferType);
323 assert(memrefDstType &&
324 "buffer layout not supported on unranked tensors");
326 rewriter, buffer, memrefDstType, options);
327 if (failed(replacement))
328 return failure();
329 buffer = *replacement;
330 }
331 newOperands.push_back(buffer);
332 }
333
334 // 3. Create the new CallOp.
335 Operation *newCallOp =
336 func::CallOp::create(rewriter, callOp.getLoc(), funcOp.getSymName(),
337 resultTypes, newOperands);
338 newCallOp->setAttrs(callOp->getAttrs());
339
340 // 4. Replace the old op with the new op.
341 replaceOpWithBufferizedValues(rewriter, callOp, newCallOp->getResults());
342
343 return success();
344 }
345};
346
348 : public BufferizableOpInterface::ExternalModel<ReturnOpInterface,
349 func::ReturnOp> {
351 const AnalysisState &state) const {
352 return true;
353 }
354
356 const AnalysisState &state) const {
357 return false;
358 }
359
360 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
361 const AnalysisState &state) const {
362 return {};
363 }
364
365 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
367 BufferizationState &state) const {
368#ifndef NDEBUG
369 auto returnOp = cast<func::ReturnOp>(op);
370 assert(isa<FuncOp>(returnOp->getParentOp()) &&
371 "only support FuncOp parent for ReturnOp");
372#endif // NDEBUG
373
374 // ReturnOps are bufferized as part of FuncOps.
375 return success();
376 }
377};
378
381 FuncOpInterface, FuncOp> {
382
383 static bool supportsUnstructuredControlFlow() { return true; }
384
386 auto isaTensor = llvm::IsaPred<TensorLikeType>;
387
388 // A function has tensor semantics if it has tensor arguments/results.
389 auto funcOp = cast<FuncOp>(op);
390 bool hasTensorArg = any_of(funcOp.getArgumentTypes(), isaTensor);
391 bool hasTensorResult = any_of(funcOp.getResultTypes(), isaTensor);
392 if (hasTensorArg || hasTensorResult)
393 return true;
394
395 // It also has tensor semantics if it has tensor block arguments.
396 // TODO: Decouple bufferization of unstructured control flow from
397 // BufferizableOpInterface implementations. We should only care about
398 // region entry block arguments here (which are already covered by the
399 // argument types of the function).
400 for (Block &block : funcOp.getBody())
401 if (any_of(block.getArgumentTypes(), isaTensor))
402 return true;
403
404 return false;
405 }
406
407 AliasingOpOperandList
409 const AnalysisState &state) const {
410 return getAliasingBranchOpOperands(op, cast<BlockArgument>(value), state);
411 }
412
413 FailureOr<BufferLikeType>
415 const BufferizationState &state,
416 SmallVector<Value> &invocationStack) const {
417 auto funcOp = cast<FuncOp>(op);
418 auto bbArg = cast<BlockArgument>(value);
419
420 // Function arguments are special.
421 if (bbArg.getOwner() == &funcOp.getBody().front())
422 return getBufferizedFunctionArgType(funcOp, bbArg.getArgNumber(),
423 options);
424
426 getBufferType(op, value, options, state, invocationStack);
427 }
428
429 /// Rewrite function bbArgs and return values into buffer form. This function
430 /// bufferizes the function signature and the ReturnOp. When the entire
431 /// function body has been bufferized, function return types can be switched
432 /// to more concise memref types as part of `foldMemRefCasts`.
433 ///
434 /// All function bbArgs are writable unless they are explicitly marked as
435 /// read-only. Callers must insert copies when needed.
436 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
438 BufferizationState &state) const {
439 auto funcOp = cast<FuncOp>(op);
440 FunctionType funcType = funcOp.getFunctionType();
441
442 // Compute the argument types.
443 SmallVector<Type> argTypes;
444 for (const auto &it : llvm::enumerate(funcType.getInputs())) {
445 Type argType = it.value();
446 if (isa<TensorLikeType>(argType)) {
447 argTypes.push_back(
448 getBufferizedFunctionArgType(funcOp, it.index(), options));
449 continue;
450 }
451 argTypes.push_back(argType);
452 }
453
454 // Compute the result types.
455 SmallVector<Type> retTypes;
456 for (Type resultType : funcType.getResults()) {
457 if (auto tensorType = dyn_cast<TensorLikeType>(resultType)) {
458 BufferLikeType resultType = options.functionArgTypeConverterFn(
459 tensorType, getDefaultMemorySpace(options, tensorType), funcOp,
460 options);
461 retTypes.push_back(resultType);
462 continue;
463 }
464 retTypes.push_back(resultType);
465 }
466
467 // Compute the new function type.
468 auto newFuncType = FunctionType::get(op->getContext(), argTypes, retTypes);
469
470 // If the function has no body, set the new function type and we are done.
471 if (funcOp.isExternal()) {
472 funcOp.setType(newFuncType);
473 return success();
474 }
475
476 // 1. Bufferize every block.
477 for (Block &block : funcOp.getBody())
478 if (failed(bufferization::bufferizeBlockSignature(&block, rewriter,
479 options, state)))
480 return failure();
481
482 // 2. Bufferize the operands of the all return op.
483 for (func::ReturnOp returnOp : getReturnOps(funcOp)) {
484 assert(returnOp->getNumOperands() == retTypes.size() &&
485 "incorrect number of return values");
486 SmallVector<Value> returnValues;
487 for (auto [returnVal, bufferizedType] :
488 llvm::zip_equal(returnOp->getOperands(), retTypes)) {
489 auto tensorType = dyn_cast<TensorLikeType>(returnVal.getType());
490 rewriter.setInsertionPoint(returnOp);
491
492 // If not a tensor type just forward it.
493 if (!tensorType) {
494 returnValues.push_back(returnVal);
495 continue;
496 }
497
498 // Note: If `inferFunctionResultLayout = true`, casts are later folded
499 // away.
500 Value toBufferOp = bufferization::ToBufferOp::create(
501 rewriter, returnOp.getLoc(), bufferizedType, returnVal);
502 returnValues.push_back(toBufferOp);
503 }
504
505 returnOp.getOperandsMutable().assign(returnValues);
506 }
507
508 // 3. Set the new function type.
509 funcOp.setType(newFuncType);
510 return success();
511 }
512
513 /// Return `true` if the given function argument is writable.
514 bool isWritable(Operation *op, Value value,
515 const AnalysisState &state) const {
516 auto funcOp = cast<FuncOp>(op);
517 BlockArgument bbArg = dyn_cast<BlockArgument>(value);
518 assert(bbArg && "expected BlockArgument");
519
520 // Non-entry block arguments are always writable. (They may alias with
521 // values that are not writable, which will turn them into read-only.)
522 if (bbArg.getOwner() != &funcOp.getBody().front())
523 return true;
524
525 // "bufferization.writable" overrides other writability decisions. This is
526 // currently used for testing only.
527 if (BoolAttr writable = funcOp.getArgAttrOfType<BoolAttr>(
528 bbArg.getArgNumber(), BufferizationDialect::kWritableAttrName))
529 return writable.getValue();
530
531 // All function arguments are writable by default.
532 return true;
533 }
534};
535
536} // namespace func_ext
537} // namespace bufferization
538} // namespace mlir
539
542 registry.addExtension(+[](MLIRContext *ctx, func::FuncDialect *dialect) {
543 func::CallOp::attachInterface<func_ext::CallOpInterface>(*ctx);
544 func::FuncOp::attachInterface<func_ext::FuncOpInterface>(*ctx);
545 func::ReturnOp::attachInterface<func_ext::ReturnOpInterface>(*ctx);
546 });
547}
return success()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class represents an argument of a Block.
Definition Value.h:309
unsigned getArgNumber() const
Returns the number of this argument.
Definition Value.h:321
Block * getOwner() const
Returns the block that owns this argument.
Definition Value.h:318
Block represents an ordered list of Operations.
Definition Block.h:33
Operation & front()
Definition Block.h:153
Special case of IntegerAttr to represent boolean integers, i.e., signless i1 integers.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
This class represents an operand of an operation.
Definition Value.h:257
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition Value.cpp:226
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
void setAttrs(DictionaryAttr newAttrs)
Set the attributes from a dictionary on this operation.
result_range getResults()
Definition Operation.h:415
MLIRContext * getContext()
Return the context this operation is associated with.
Definition Operation.h:216
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
This class represents a collection of SymbolTables.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
State for analysis-enabled bufferization.
static BufferLikeType getBufferizedFunctionArgType(FuncOp funcOp, int64_t index, const BufferizationOptions &options)
Return the index-th bufferized function argument type.
FuncOpAnalysisState
The state of analysis of a FuncOp.
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
static FuncOp getCalledFunction(CallOpInterface callOp, SymbolTableCollection &symbolTables)
Return the FuncOp called by callOp.
static std::optional< int64_t > getEquivalentFuncArgIdx(FuncOp funcOp, const FuncAnalysisState &state, int64_t returnValIdx)
Return the index of the bbArg in the given FuncOp that is equivalent to the specified return value (i...
static mlir::Attribute getDefaultMemorySpace(const BufferizationOptions &options, TensorLikeType type)
static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state, FuncOp funcOp)
Return the state (phase) of analysis of the FuncOp.
static const FuncAnalysisState & getFuncAnalysisState(const AnalysisState &state)
Get FuncAnalysisState.
FailureOr< Value > castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options)
Try to cast the given ranked MemRef-typed value to the given ranked MemRef type.
SmallVector< func::ReturnOp > getReturnOps(func::FuncOp funcOp)
Helper function that returns all func.return ops in the given function.
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
Include the generated interface declarations.
A template that provides a default implementation of getAliasingOpOperands for ops that support unstr...
AliasingOpOperandList getAliasingBranchOpOperands(Operation *op, BlockArgument bbArg, const AnalysisState &state) const
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const
All function arguments are writable.
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
Extra analysis state that is required for bufferization of function boundaries.
DenseMap< FuncOp, IndexMapping > equivalentFuncArgs
A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg indices.
DenseMap< int64_t, SmallVector< int64_t > > IndexToIndexListMapping
A mapping of indices to a list of indices.
DenseMap< FuncOp, IndexToIndexListMapping > aliasingReturnVals
A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
DenseMap< FuncOp, BbArgIndexSet > readBbArgs
A set of all read BlockArguments of FuncOps.
DenseSet< int64_t > BbArgIndexSet
A set of block argument indices.
DenseMap< FuncOp, BbArgIndexSet > writtenBbArgs
A set of all written-to BlockArguments of FuncOps.
DenseMap< FuncOp, FuncOpAnalysisState > analyzedFuncOps
Keep track of which FuncOps are fully analyzed or currently being analyzed.
void startFunctionAnalysis(FuncOp funcOp)
This function is called right before analyzing the given FuncOp.
DenseMap< int64_t, int64_t > IndexMapping
A mapping of indices to indices.
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
AliasingOpOperandList getAliasingOpOperands(Operation *op, Value value, const AnalysisState &state) const
bool isWritable(Operation *op, Value value, const AnalysisState &state) const
Return true if the given function argument is writable.
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const
Rewrite function bbArgs and return values into buffer form.
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const