MLIR 23.0.0git
FuncBufferizableOpInterfaceImpl.cpp
Go to the documentation of this file.
1//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "mlir/IR/Dialect.h"
17#include "mlir/IR/Operation.h"
18#include <optional>
19
20namespace mlir {
21/// Return all func.return ops in the given function.
24 for (Block &b : funcOp.getBody())
25 if (auto returnOp = dyn_cast<func::ReturnOp>(b.getTerminator()))
26 result.push_back(returnOp);
27 return result;
28}
29
30namespace bufferization {
31namespace func_ext {
32
35 auto createdEquiv = equivalentFuncArgs.try_emplace(funcOp, IndexMapping());
36 auto createdAliasingResults =
37 aliasingReturnVals.try_emplace(funcOp, IndexToIndexListMapping());
38 auto createdRead = readBbArgs.try_emplace(funcOp, BbArgIndexSet());
39 auto createdWritten = writtenBbArgs.try_emplace(funcOp, BbArgIndexSet());
40 (void)createdEquiv;
41 (void)createdAliasingResults;
42 (void)createdRead;
43 (void)createdWritten;
44#ifndef NDEBUG
45 assert(createdEquiv.second && "equivalence info exists already");
46 assert(createdAliasingResults.second && "aliasing info exists already");
47 assert(createdRead.second && "bbarg access info exists already");
48 assert(createdWritten.second && "bbarg access info exists already");
49#endif // NDEBUG
50}
51
52// Note: this is a local adaptor to unify TensorType and TensorLikeType code
53// paths that both work with BufferizationOptions.
54static mlir::Attribute
56 TensorLikeType type) {
57 if (auto tensorType = dyn_cast<TensorType>(type)) {
58 return *options.defaultMemorySpaceFn(tensorType);
59 }
60 return nullptr;
61}
62
63/// Return the index-th bufferized function argument type. This assumes that the
64/// specified argument is a tensor. If the tensor is ranked, a layout map may be
65/// specified by the user (as per `options.functionArgTypeConverterFn`).
66static BufferLikeType
69 auto type =
70 dyn_cast<TensorLikeType>(funcOp.getFunctionType().getInput(index));
71 assert(type && "expected TensorLikeType");
72
73 // Note: For builtin tensors there is additional logic related to layout.
74 if (auto tensorType = dyn_cast<TensorType>(type)) {
75 BufferLikeType memrefType = options.functionArgTypeConverterFn(
76 type, *options.defaultMemorySpaceFn(tensorType), funcOp, options);
77
78 auto layoutAttr = funcOp.getArgAttrOfType<MemRefLayoutAttrInterface>(
79 index, BufferizationDialect::kBufferLayoutAttrName);
80 if (!layoutAttr)
81 return memrefType;
82
83 auto rankedMemrefType = dyn_cast<MemRefType>(memrefType);
84 assert(rankedMemrefType &&
85 "buffer layout not supported on unranked tensors");
86 return cast<BufferLikeType>(MemRefType::get(
87 rankedMemrefType.getShape(), rankedMemrefType.getElementType(),
88 layoutAttr, rankedMemrefType.getMemorySpace()));
89 }
90
91 return options.functionArgTypeConverterFn(type, /*memSpace=*/nullptr, funcOp,
92 options);
93}
94
95/// Return the FuncOp called by `callOp`.
97 SymbolTableCollection &symbolTables) {
98 return dyn_cast_or_null<FuncOp>(callOp.resolveCallableInTable(&symbolTables));
99}
100
101/// Return the FuncOp called by `callOp`.
103 const AnalysisState &state) {
104 auto &oneShotAnalysisState = static_cast<const OneShotAnalysisState &>(state);
105
106 if (auto *funcAnalysisState =
107 oneShotAnalysisState.getExtension<FuncAnalysisState>()) {
108 // Use the cached symbol tables.
109 return getCalledFunction(callOp, funcAnalysisState->symbolTables);
110 }
111
112 SymbolTableCollection symbolTables;
113 return getCalledFunction(callOp, symbolTables);
114}
115
116/// Get FuncAnalysisState.
117static const FuncAnalysisState &
119 assert(isa<OneShotAnalysisState>(state) && "expected OneShotAnalysisState");
120 auto *result = static_cast<const OneShotAnalysisState &>(state)
121 .getExtension<FuncAnalysisState>();
122 assert(result && "FuncAnalysisState does not exist");
123 return *result;
124}
125
126/// Return the state (phase) of analysis of the FuncOp.
128 FuncOp funcOp) {
129 if (!isa<OneShotAnalysisState>(state))
131 auto *funcState = static_cast<const OneShotAnalysisState &>(state)
132 .getExtension<FuncAnalysisState>();
133 if (!funcState)
135 const auto &analyzedFuncOps = funcState->analyzedFuncOps;
136 auto it = analyzedFuncOps.find(funcOp);
137 if (it == analyzedFuncOps.end())
139 return it->second;
140}
141
142/// Return the index of the bbArg in the given FuncOp that is equivalent to the
143/// specified return value (if any).
144static std::optional<int64_t>
145getEquivalentFuncArgIdx(FuncOp funcOp, const FuncAnalysisState &state,
146 int64_t returnValIdx) {
147 auto funcOpIt = state.equivalentFuncArgs.find(funcOp);
148 if (funcOpIt == state.equivalentFuncArgs.end())
149 // No equivalence info stores for funcOp.
150 return std::nullopt;
151
152 auto retValIt = funcOpIt->getSecond().find(returnValIdx);
153 if (retValIt == funcOpIt->getSecond().end())
154 // Return value has no equivalent bbArg.
155 return std::nullopt;
156
157 return retValIt->getSecond();
158}
159
161 : public BufferizableOpInterface::ExternalModel<CallOpInterface,
162 func::CallOp> {
164 const AnalysisState &state) const {
165 func::CallOp callOp = cast<func::CallOp>(op);
166 FuncOp funcOp = getCalledFunction(callOp, state);
167 assert(funcOp && "expected CallOp to a FuncOp");
168
170 // FuncOp not analyzed yet. Assume that OpOperand is read.
171 return true;
172
173 const FuncAnalysisState &funcState = getFuncAnalysisState(state);
174 return funcState.readBbArgs.lookup(funcOp).contains(
175 opOperand.getOperandNumber());
176 }
177
179 const AnalysisState &state) const {
180 func::CallOp callOp = cast<func::CallOp>(op);
181 FuncOp funcOp = getCalledFunction(callOp, state);
182 assert(funcOp && "expected CallOp to a FuncOp");
183
185 // FuncOp not analyzed yet. Assume that OpOperand is written.
186 return true;
187
188 const FuncAnalysisState &funcState = getFuncAnalysisState(state);
189 return funcState.writtenBbArgs.lookup(funcOp).contains(
190 opOperand.getOperandNumber());
191 }
192
193 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
194 const AnalysisState &state) const {
195 func::CallOp callOp = cast<func::CallOp>(op);
196 FuncOp funcOp = getCalledFunction(callOp, state);
197 assert(funcOp && "expected CallOp to a FuncOp");
199 // FuncOp not analyzed yet. Any OpResult may be aliasing.
200 return detail::unknownGetAliasingValues(opOperand);
201
202 // Get aliasing results from state.
203 const FuncAnalysisState &funcState = getFuncAnalysisState(state);
204 auto aliasingReturnVals =
205 funcState.aliasingReturnVals.lookup(funcOp).lookup(
206 opOperand.getOperandNumber());
207
208 // Check if the aliasing OpResult is equivalent to the OpOperand.
209 std::optional<int64_t> equivalent = {};
210 if (aliasingReturnVals.size() == 1) {
211 equivalent = getEquivalentFuncArgIdx(funcOp, funcState,
212 aliasingReturnVals.front());
213 assert((!equivalent.has_value() ||
214 *equivalent == opOperand.getOperandNumber()) &&
215 "inconsistent analysis state");
216 }
217 AliasingValueList result;
218 for (int64_t resultIdx : aliasingReturnVals)
219 result.addAlias({callOp->getOpResult(resultIdx),
220 equivalent.has_value() ? BufferRelation::Equivalent
221 : BufferRelation::Unknown,
222 /*isDefinite=*/equivalent.has_value()});
223 return result;
224 }
225
226 FailureOr<BufferLikeType>
228 const BufferizationState &state,
229 SmallVector<Value> &invocationStack) const {
230 auto callOp = cast<func::CallOp>(op);
231
232 // Reuse the cached symbol tables from the bufferization state.
233 FuncOp funcOp = getCalledFunction(callOp, state.getSymbolTables());
234 assert(funcOp && "expected CallOp to a FuncOp");
235
236 // If the callee was already bufferized, we can directly take the type from
237 // its signature.
238 FunctionType funcType = funcOp.getFunctionType();
239 Type resultType =
240 funcType.getResult(cast<OpResult>(value).getResultNumber());
241 if (auto bufferizedType = dyn_cast<BufferLikeType>(resultType))
242 return bufferizedType;
243
244 // Otherwise, call the type converter to compute the bufferized type.
245 auto tensorType = cast<TensorLikeType>(resultType);
246 return cast<BufferLikeType>(options.functionArgTypeConverterFn(
247 tensorType, getDefaultMemorySpace(options, tensorType), funcOp,
248 options));
249 }
250
251 /// All function arguments are writable. It is the responsibility of the
252 /// CallOp to insert buffer copies where necessary.
253 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
255 BufferizationState &state) const {
256 func::CallOp callOp = cast<func::CallOp>(op);
257
258 // 1. Compute the result types of the new CallOp.
259 SmallVector<Type> resultTypes;
260 for (Value result : callOp.getResults()) {
261 Type returnType = result.getType();
262 if (!isa<TensorLikeType>(returnType)) {
263 // Non-tensor values are returned.
264 resultTypes.push_back(returnType);
265 continue;
266 }
267
268 // Returning a memref.
269 FailureOr<BufferLikeType> resultType =
270 bufferization::getBufferType(result, options, state);
271 if (failed(resultType))
272 return failure();
273 resultTypes.push_back(*resultType);
274 }
275
276 // 2. Rewrite tensor operands as memrefs based on type of the already
277 // bufferized callee.
278 SmallVector<Value> newOperands;
279
280 FuncOp funcOp = getCalledFunction(callOp, state.getSymbolTables());
281 assert(funcOp && "expected CallOp to a FuncOp");
282 FunctionType funcType = funcOp.getFunctionType();
283
284 for (OpOperand &opOperand : callOp->getOpOperands()) {
285 // Non-tensor operands are just copied.
286 if (!isa<TensorLikeType>(opOperand.get().getType())) {
287 newOperands.push_back(opOperand.get());
288 continue;
289 }
290
291 // Retrieve buffers for tensor operands.
292 FailureOr<Value> maybeBuffer =
293 getBuffer(rewriter, opOperand.get(), options, state);
294 if (failed(maybeBuffer))
295 return failure();
296 Value buffer = *maybeBuffer;
297
298 // Caller / callee type mismatch is handled with castOrReallocMemRefValue.
299 auto bufferType = funcType.getInput(opOperand.getOperandNumber());
300 if (!isa<BufferLikeType>(bufferType)) {
301 // The called function was not bufferized yet. This can happen when
302 // there cycles in the function call graph. Compute the bufferized
303 // result type.
304 FailureOr<BufferLikeType> maybeBufferType =
305 bufferization::getBufferType(
306 funcOp.getArgument(opOperand.getOperandNumber()), options,
307 state);
308 if (failed(maybeBufferType))
309 return failure();
310 bufferType = *maybeBufferType;
311 }
312
313 // Since we don't yet have a clear layout story, to_buffer may
314 // conservatively turn tensors into more dynamic memref than necessary.
315 // If the memref type of the callee fails, introduce an extra memref.cast
316 // that will either canonicalize away or fail compilation until we can do
317 // something better. Insert a reallocation + copy if it cannot be
318 // statically guaranteed that a direct cast would be valid.
319 if (buffer.getType() != bufferType) {
320 auto memrefDstType = dyn_cast<MemRefType>(bufferType);
321 assert(memrefDstType &&
322 "buffer layout not supported on unranked tensors");
324 rewriter, buffer, memrefDstType, options);
325 if (failed(replacement))
326 return failure();
327 buffer = *replacement;
328 }
329 newOperands.push_back(buffer);
330 }
331
332 // 3. Create the new CallOp.
333 Operation *newCallOp =
334 func::CallOp::create(rewriter, callOp.getLoc(), funcOp.getSymName(),
335 resultTypes, newOperands);
336 newCallOp->setAttrs(callOp->getAttrs());
337
338 // 4. Replace the old op with the new op.
339 replaceOpWithBufferizedValues(rewriter, callOp, newCallOp->getResults());
340
341 return success();
342 }
343};
344
346 : public BufferizableOpInterface::ExternalModel<ReturnOpInterface,
347 func::ReturnOp> {
349 const AnalysisState &state) const {
350 return true;
351 }
352
354 const AnalysisState &state) const {
355 return false;
356 }
357
358 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
359 const AnalysisState &state) const {
360 return {};
361 }
362
363 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
365 BufferizationState &state) const {
366#ifndef NDEBUG
367 auto returnOp = cast<func::ReturnOp>(op);
368 assert(isa<FuncOp>(returnOp->getParentOp()) &&
369 "only support FuncOp parent for ReturnOp");
370#endif // NDEBUG
371
372 // ReturnOps are bufferized as part of FuncOps.
373 return success();
374 }
375};
376
379 FuncOpInterface, FuncOp> {
380
381 static bool supportsUnstructuredControlFlow() { return true; }
382
384 auto isaTensor = llvm::IsaPred<TensorLikeType>;
385
386 // A function has tensor semantics if it has tensor arguments/results.
387 auto funcOp = cast<FuncOp>(op);
388 bool hasTensorArg = any_of(funcOp.getArgumentTypes(), isaTensor);
389 bool hasTensorResult = any_of(funcOp.getResultTypes(), isaTensor);
390 if (hasTensorArg || hasTensorResult)
391 return true;
392
393 // It also has tensor semantics if it has tensor block arguments.
394 // TODO: Decouple bufferization of unstructured control flow from
395 // BufferizableOpInterface implementations. We should only care about
396 // region entry block arguments here (which are already covered by the
397 // argument types of the function).
398 for (Block &block : funcOp.getBody())
399 if (any_of(block.getArgumentTypes(), isaTensor))
400 return true;
401
402 return false;
403 }
404
405 AliasingOpOperandList
407 const AnalysisState &state) const {
408 return getAliasingBranchOpOperands(op, cast<BlockArgument>(value), state);
409 }
410
411 FailureOr<BufferLikeType>
413 const BufferizationState &state,
414 SmallVector<Value> &invocationStack) const {
415 auto funcOp = cast<FuncOp>(op);
416 auto bbArg = cast<BlockArgument>(value);
417
418 // Function arguments are special.
419 if (bbArg.getOwner() == &funcOp.getBody().front())
420 return getBufferizedFunctionArgType(funcOp, bbArg.getArgNumber(),
421 options);
422
424 getBufferType(op, value, options, state, invocationStack);
425 }
426
427 /// Rewrite function bbArgs and return values into buffer form. This function
428 /// bufferizes the function signature and the ReturnOp. When the entire
429 /// function body has been bufferized, function return types can be switched
430 /// to more concise memref types as part of `foldMemRefCasts`.
431 ///
432 /// All function bbArgs are writable unless they are explicitly marked as
433 /// read-only. Callers must insert copies when needed.
434 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
436 BufferizationState &state) const {
437 auto funcOp = cast<FuncOp>(op);
438 FunctionType funcType = funcOp.getFunctionType();
439
440 // Compute the argument types.
441 SmallVector<Type> argTypes;
442 for (const auto &it : llvm::enumerate(funcType.getInputs())) {
443 Type argType = it.value();
444 if (isa<TensorLikeType>(argType)) {
445 argTypes.push_back(
446 getBufferizedFunctionArgType(funcOp, it.index(), options));
447 continue;
448 }
449 argTypes.push_back(argType);
450 }
451
452 // Compute the result types.
453 SmallVector<Type> retTypes;
454 for (Type resultType : funcType.getResults()) {
455 if (auto tensorType = dyn_cast<TensorLikeType>(resultType)) {
456 BufferLikeType resultType = options.functionArgTypeConverterFn(
457 tensorType, getDefaultMemorySpace(options, tensorType), funcOp,
458 options);
459 retTypes.push_back(resultType);
460 continue;
461 }
462 retTypes.push_back(resultType);
463 }
464
465 // Compute the new function type.
466 auto newFuncType = FunctionType::get(op->getContext(), argTypes, retTypes);
467
468 // If the function has no body, set the new function type and we are done.
469 if (funcOp.isExternal()) {
470 funcOp.setType(newFuncType);
471 return success();
472 }
473
474 // 1. Bufferize every block.
475 for (Block &block : funcOp.getBody())
476 if (failed(bufferization::bufferizeBlockSignature(&block, rewriter,
477 options, state)))
478 return failure();
479
480 // 2. Bufferize the operands of the all return op.
481 for (func::ReturnOp returnOp : getReturnOps(funcOp)) {
482 assert(returnOp->getNumOperands() == retTypes.size() &&
483 "incorrect number of return values");
484 SmallVector<Value> returnValues;
485 for (auto [returnVal, bufferizedType] :
486 llvm::zip_equal(returnOp->getOperands(), retTypes)) {
487 auto tensorType = dyn_cast<TensorLikeType>(returnVal.getType());
488 rewriter.setInsertionPoint(returnOp);
489
490 // If not a tensor type just forward it.
491 if (!tensorType) {
492 returnValues.push_back(returnVal);
493 continue;
494 }
495
496 // Note: If `inferFunctionResultLayout = true`, casts are later folded
497 // away.
498 Value toBufferOp = bufferization::ToBufferOp::create(
499 rewriter, returnOp.getLoc(), bufferizedType, returnVal);
500 returnValues.push_back(toBufferOp);
501 }
502
503 returnOp.getOperandsMutable().assign(returnValues);
504 }
505
506 // 3. Set the new function type.
507 funcOp.setType(newFuncType);
508 return success();
509 }
510
511 /// Return `true` if the given function argument is writable.
512 bool isWritable(Operation *op, Value value,
513 const AnalysisState &state) const {
514 auto funcOp = cast<FuncOp>(op);
515 BlockArgument bbArg = dyn_cast<BlockArgument>(value);
516 assert(bbArg && "expected BlockArgument");
517
518 // Non-entry block arguments are always writable. (They may alias with
519 // values that are not writable, which will turn them into read-only.)
520 if (bbArg.getOwner() != &funcOp.getBody().front())
521 return true;
522
523 // "bufferization.writable" overrides other writability decisions. This is
524 // currently used for testing only.
525 if (BoolAttr writable = funcOp.getArgAttrOfType<BoolAttr>(
526 bbArg.getArgNumber(), BufferizationDialect::kWritableAttrName))
527 return writable.getValue();
528
529 // All function arguments are writable by default.
530 return true;
531 }
532};
533
534} // namespace func_ext
535} // namespace bufferization
536} // namespace mlir
537
540 registry.addExtension(+[](MLIRContext *ctx, func::FuncDialect *dialect) {
541 func::CallOp::attachInterface<func_ext::CallOpInterface>(*ctx);
542 func::FuncOp::attachInterface<func_ext::FuncOpInterface>(*ctx);
543 func::ReturnOp::attachInterface<func_ext::ReturnOpInterface>(*ctx);
544 });
545}
return success()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class represents an argument of a Block.
Definition Value.h:309
unsigned getArgNumber() const
Returns the number of this argument.
Definition Value.h:321
Block * getOwner() const
Returns the block that owns this argument.
Definition Value.h:318
Block represents an ordered list of Operations.
Definition Block.h:33
Operation & front()
Definition Block.h:163
Special case of IntegerAttr to represent boolean integers, i.e., signless i1 integers.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
This class represents an operand of an operation.
Definition Value.h:257
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition Value.cpp:226
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
void setAttrs(DictionaryAttr newAttrs)
Set the attributes from a dictionary on this operation.
result_range getResults()
Definition Operation.h:415
MLIRContext * getContext()
Return the context this operation is associated with.
Definition Operation.h:216
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
This class represents a collection of SymbolTables.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
State for analysis-enabled bufferization.
static BufferLikeType getBufferizedFunctionArgType(FuncOp funcOp, int64_t index, const BufferizationOptions &options)
Return the index-th bufferized function argument type.
FuncOpAnalysisState
The state of analysis of a FuncOp.
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
static FuncOp getCalledFunction(CallOpInterface callOp, SymbolTableCollection &symbolTables)
Return the FuncOp called by callOp.
static std::optional< int64_t > getEquivalentFuncArgIdx(FuncOp funcOp, const FuncAnalysisState &state, int64_t returnValIdx)
Return the index of the bbArg in the given FuncOp that is equivalent to the specified return value (i...
static mlir::Attribute getDefaultMemorySpace(const BufferizationOptions &options, TensorLikeType type)
static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state, FuncOp funcOp)
Return the state (phase) of analysis of the FuncOp.
static const FuncAnalysisState & getFuncAnalysisState(const AnalysisState &state)
Get FuncAnalysisState.
FailureOr< Value > castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options)
Try to cast the given ranked MemRef-typed value to the given ranked MemRef type.
SmallVector< func::ReturnOp > getReturnOps(func::FuncOp funcOp)
Helper function that returns all func.return ops in the given function.
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
Include the generated interface declarations.
A template that provides a default implementation of getAliasingOpOperands for ops that support unstr...
AliasingOpOperandList getAliasingBranchOpOperands(Operation *op, BlockArgument bbArg, const AnalysisState &state) const
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const
All function arguments are writable.
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
Extra analysis state that is required for bufferization of function boundaries.
DenseMap< FuncOp, IndexMapping > equivalentFuncArgs
A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg indices.
DenseMap< int64_t, SmallVector< int64_t > > IndexToIndexListMapping
A mapping of indices to a list of indices.
DenseMap< FuncOp, IndexToIndexListMapping > aliasingReturnVals
A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
DenseMap< FuncOp, BbArgIndexSet > readBbArgs
A set of all read BlockArguments of FuncOps.
DenseSet< int64_t > BbArgIndexSet
A set of block argument indices.
DenseMap< FuncOp, BbArgIndexSet > writtenBbArgs
A set of all written-to BlockArguments of FuncOps.
DenseMap< FuncOp, FuncOpAnalysisState > analyzedFuncOps
Keep track of which FuncOps are fully analyzed or currently being analyzed.
void startFunctionAnalysis(FuncOp funcOp)
This function is called right before analyzing the given FuncOp.
DenseMap< int64_t, int64_t > IndexMapping
A mapping of indices to indices.
FailureOr< BufferLikeType > getBufferType(Operation *op, Value value, const BufferizationOptions &options, const BufferizationState &state, SmallVector< Value > &invocationStack) const
AliasingOpOperandList getAliasingOpOperands(Operation *op, Value value, const AnalysisState &state) const
bool isWritable(Operation *op, Value value, const AnalysisState &state) const
Return true if the given function argument is writable.
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const
Rewrite function bbArgs and return values into buffer form.
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state) const