MLIR 23.0.0git
OneShotModuleBufferize.cpp
Go to the documentation of this file.
1//===- OneShotModuleBufferize.cpp - Bufferization across Func. Boundaries
2//----===//
3//
4// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5// See https://llvm.org/LICENSE.txt for license information.
6// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//
8//===----------------------------------------------------------------------===//
9//
10// Module Bufferization is an extension of One-Shot Bufferize that
11// bufferizes function boundaries. It provides `BufferizableOpInterface`
12// implementations for FuncOp, CallOp and ReturnOp. Although it is named
13// Module Bufferization, it may operate on any SymbolTable.
14//
15// Module Bufferization is run via `runOneShotModuleBufferize(SymbolTableOp,
16// ...)`. This function analyzes the given op and determines the order of
17// analysis and bufferization: Functions that are called are processed before
18// their respective callers.
19//
20// After analyzing a FuncOp, additional information about its bbArgs is
21// gathered and stored in `FuncAnalysisState`.
22//
23// * `aliasingFuncOpBBArgsAnalysis` determines the equivalent/aliasing bbArgs
24// for
25// each tensor return value (if any).
26// * `funcOpBbArgReadWriteAnalysis` determines whether or not a tensor bbArg is
27// read/written.
28//
29// Module Bufferization implements the following calling convention.
30//
31// * In the absence of conflicts within a FuncOp, the FuncOp's bbArgs may always
32// be written to in-place.
33// * If a tensor operand of a CallOp is read after the CallOp, the operand of
34// the CallOp must bufferize out-of-place.
35//
36// Example: The tensor.insert op bufferizes in-place because it is allowed to
37// modify the buffer of `%t1` directly. The CallOp in `caller` must bufferize
38// out-of-place because `%t0` is modified by the callee but read by the
39// tensor.extract op. The analysis of CallOps decides whether an OpOperand must
40// bufferize out-of-place based on results of `funcOpBbArgReadWriteAnalysis`.
41// ```
42// func @callee(%t1 : tensor<?xf32>) -> tensor<?xf32> {
43// %f = ... : f32
44// %0 = tensor.insert %f into %t1[...] : tensor<?xf32>
45// return %0 : tensor<?xf32>
46// }
47//
48// func @caller() -> () {
49// %t0 = ... : tensor<?xf32>
50// %1 = call @callee(%t0) : (tensor<?xf32>) -> (tensor<?xf32>)
51// %2 = tensor.extract %1[...] : tensor<?xf32>
52// }
53// ```
54//
55// Note: If a function is external, `funcOpBbArgReadWriteAnalysis` cannot
56// analyze the function body. In such a case, the CallOp analysis conservatively
57// assumes that each tensor OpOperand is both read and written.
58//
59// TODO: Add FuncOp attributes so that bbArgs of external FuncOps can be marked
60// as "not reading" and/or "not writing".
61
72#include "mlir/IR/Operation.h"
73#include "llvm/ADT/MapVector.h"
74#include "llvm/ADT/SmallVectorExtras.h"
75
76using namespace mlir;
77using namespace mlir::bufferization;
78using namespace mlir::bufferization::func_ext;
79
80/// A mapping of FuncOps to their callers.
82
83/// Get or create FuncAnalysisState.
84static FuncAnalysisState &
91
92namespace {
93
94/// Annotate IR with the results of the analysis. For testing purposes only.
95static void annotateEquivalentReturnBbArg(OpOperand &returnVal,
96 BlockArgument bbArg) {
97 const char *kEquivalentArgsAttr = "__equivalent_func_args__";
98 Operation *op = returnVal.getOwner();
99
100 SmallVector<int64_t> equivBbArgs;
101 if (op->hasAttr(kEquivalentArgsAttr)) {
102 auto attr = cast<ArrayAttr>(op->getAttr(kEquivalentArgsAttr));
103 equivBbArgs = llvm::map_to_vector<4>(attr, [](Attribute a) {
104 return cast<IntegerAttr>(a).getValue().getSExtValue();
105 });
106 } else {
107 equivBbArgs.append(op->getNumOperands(), -1);
108 }
109 equivBbArgs[returnVal.getOperandNumber()] = bbArg.getArgNumber();
110
111 OpBuilder b(op->getContext());
112 op->setAttr(kEquivalentArgsAttr, b.getI64ArrayAttr(equivBbArgs));
113}
114
115/// Store function BlockArguments that are equivalent to/aliasing a returned
116/// value in FuncAnalysisState.
117static LogicalResult
118aliasingFuncOpBBArgsAnalysis(FuncOp funcOp, OneShotAnalysisState &state,
119 FuncAnalysisState &funcState) {
120 if (funcOp.getBody().empty()) {
121 // No function body available. Conservatively assume that every tensor
122 // return value may alias with any tensor bbArg.
123 FunctionType type = funcOp.getFunctionType();
124 for (const auto &inputIt : llvm::enumerate(type.getInputs())) {
125 if (!isa<TensorType>(inputIt.value()))
126 continue;
127 for (const auto &resultIt : llvm::enumerate(type.getResults())) {
128 if (!isa<TensorType>(resultIt.value()))
129 continue;
130 int64_t returnIdx = resultIt.index();
131 int64_t bbArgIdx = inputIt.index();
132 funcState.aliasingReturnVals[funcOp][bbArgIdx].push_back(returnIdx);
133 }
134 }
135 return success();
136 }
137
138 // Find all func.return ops.
139 SmallVector<func::ReturnOp> returnOps = getReturnOps(funcOp);
140 // TODO: throw error when there is any non-func.return op that has the
141 // ReturnLike trait
142 if (returnOps.empty()) {
143 return funcOp.emitError("cannot bufferize func.func without func.return");
144 }
145
146 // Build alias sets. Merge all aliases from all func.return ops.
147 for (BlockArgument bbArg : funcOp.getArguments()) {
148 if (isa<RankedTensorType>(bbArg.getType())) {
149 int64_t bbArgIdx = bbArg.getArgNumber();
150 // Store aliases in a set, so that we don't add the same alias twice.
151 SetVector<int64_t> aliases;
152 for (func::ReturnOp returnOp : returnOps) {
153 for (OpOperand &returnVal : returnOp->getOpOperands()) {
154 if (isa<RankedTensorType>(returnVal.get().getType())) {
155 int64_t returnIdx = returnVal.getOperandNumber();
156 if (state.areAliasingBufferizedValues(returnVal.get(), bbArg))
157 aliases.insert(returnIdx);
158 }
159 }
160 }
161 for (int64_t alias : aliases)
162 funcState.aliasingReturnVals[funcOp][bbArgIdx].push_back(alias);
163 }
164 }
165
166 // Build equivalence sets.
167 // Helper function that finds an equivalent block argument index for the
168 // given OpOperand. Return std::nullopt if no equivalent block argument could
169 // be found.
170 auto findEquivalentBlockArgIdx =
171 [&](OpOperand &opOperand) -> std::optional<int64_t> {
172 Value v = opOperand.get();
173 if (!isa<TensorType>(v.getType()))
174 return std::nullopt;
175 for (BlockArgument bbArg : funcOp.getArguments()) {
176 if (isa<RankedTensorType>(bbArg.getType())) {
177 if (state.areEquivalentBufferizedValues(v, bbArg)) {
178 if (state.getOptions().testAnalysisOnly)
179 annotateEquivalentReturnBbArg(opOperand, bbArg);
180 return bbArg.getArgNumber();
181 }
182 }
183 }
184 return std::nullopt;
185 };
186
187 int64_t numResults = returnOps.front()->getNumOperands();
188 for (int64_t i = 0; i < numResults; ++i) {
189 // Find the equivalent block argument index for the i-th operand of the
190 // first func.return op.
191 std::optional<int64_t> maybeEquiv =
192 findEquivalentBlockArgIdx(returnOps.front()->getOpOperand(i));
193 if (!maybeEquiv.has_value())
194 continue;
195 int64_t bbArgIdx = *maybeEquiv;
196 bool allEquiv = true;
197
198 // Check if all other func.return ops have the same equivalent block
199 // argument for the i-th operand. In contrast to aliasing information,
200 // which is just "merged", equivalence information must match across all
201 // func.return ops.
202 for (func::ReturnOp returnOp : ArrayRef(returnOps).drop_front()) {
203 std::optional<int64_t> maybeEquiv =
204 findEquivalentBlockArgIdx(returnOp->getOpOperand(i));
205 if (maybeEquiv != bbArgIdx) {
206 allEquiv = false;
207 break;
208 }
209 }
210
211 // All func.return ops have the same equivalent block argument for the i-th
212 // operand.
213 if (allEquiv)
214 funcState.equivalentFuncArgs[funcOp][i] = bbArgIdx;
215 }
216
217 return success();
218}
219
220static void annotateFuncArgAccess(func::FuncOp funcOp, int64_t idx, bool isRead,
221 bool isWritten) {
222 OpBuilder b(funcOp.getContext());
223 Attribute accessType;
224 if (isRead && isWritten) {
225 accessType = b.getStringAttr("read-write");
226 } else if (isRead) {
227 accessType = b.getStringAttr("read");
228 } else if (isWritten) {
229 accessType = b.getStringAttr("write");
230 } else {
231 accessType = b.getStringAttr("none");
232 }
233 funcOp.setArgAttr(idx, BufferizationDialect::kBufferAccessAttrName,
234 accessType);
235}
236
237/// Determine which FuncOp bbArgs are read and which are written. When run on a
238/// function with unknown ops, we conservatively assume that such ops bufferize
239/// to a read + write.
240static LogicalResult
241funcOpBbArgReadWriteAnalysis(FuncOp funcOp, OneShotAnalysisState &state,
242 FuncAnalysisState &funcState) {
243 for (int64_t idx = 0, e = funcOp.getFunctionType().getNumInputs(); idx < e;
244 ++idx) {
245 // Skip non-tensor arguments.
246 if (!isa<TensorType>(funcOp.getFunctionType().getInput(idx)))
247 continue;
248 bool isRead;
249 bool isWritten;
250 if (auto accessAttr = funcOp.getArgAttrOfType<StringAttr>(
251 idx, BufferizationDialect::kBufferAccessAttrName)) {
252 // Buffer access behavior is specified on the function. Skip the analysis.
253 StringRef str = accessAttr.getValue();
254 isRead = str == "read" || str == "read-write";
255 isWritten = str == "write" || str == "read-write";
256 } else if (funcOp.getBody().empty()) {
257 // If the function has no body, conservatively assume that all args are
258 // read + written.
259 isRead = true;
260 isWritten = true;
261 } else {
262 // Analyze the body of the function.
263 BlockArgument bbArg = funcOp.getArgument(idx);
264 isRead = state.isValueRead(bbArg);
265 isWritten = state.isValueWritten(bbArg);
266 }
267
268 if (state.getOptions().testAnalysisOnly)
269 annotateFuncArgAccess(funcOp, idx, isRead, isWritten);
270 if (isRead)
271 funcState.readBbArgs[funcOp].insert(idx);
272 if (isWritten)
273 funcState.writtenBbArgs[funcOp].insert(idx);
274 }
275
276 return success();
277}
278} // namespace
279
280/// Remove bufferization attributes on FuncOp arguments.
282 auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
283 funcOp.removeArgAttr(bbArg.getArgNumber(),
284 BufferizationDialect::kBufferLayoutAttrName);
285 funcOp.removeArgAttr(bbArg.getArgNumber(),
286 BufferizationDialect::kWritableAttrName);
287}
288
289/// Return the func::FuncOp called by `callOp`.
290static func::FuncOp
291getCalledFunction(func::CallOp callOp,
292 mlir::SymbolTableCollection &symbolTable) {
293 return dyn_cast_or_null<func::FuncOp>(
294 callOp.resolveCallableInTable(&symbolTable));
295}
296
297/// Return "true" if the given function signature has tensor semantics.
298static bool hasTensorSignature(func::FuncOp funcOp) {
299 return llvm::any_of(funcOp.getFunctionType().getInputs(),
300 llvm::IsaPred<TensorType>) ||
301 llvm::any_of(funcOp.getFunctionType().getResults(),
302 llvm::IsaPred<TensorType>);
303}
304
305/// Store all functions of the `moduleOp` in `orderedFuncOps`, sorted by
306/// callee-caller order (i.e., callees without callers first). Store all
307/// remaining functions (i.e., the ones that call each other recursively) in
308/// `remainingFuncOps`. Does not traverse nested symbol tables.
309///
310/// Store the map of FuncOp to all its callers in `callerMap`.
311///
312/// Return `failure()` if we are unable to retrieve the called FuncOp from
313/// any func::CallOp.
314static LogicalResult getFuncOpsOrderedByCalls(
315 Operation *moduleOp, SmallVectorImpl<func::FuncOp> &orderedFuncOps,
316 SmallVectorImpl<func::FuncOp> &remainingFuncOps, FuncCallerMap &callerMap,
317 SymbolTableCollection &symbolTables) {
318 // For each FuncOp, the set of functions called by it (i.e. the union of
319 // symbols of all nested func::CallOp).
321 // For each FuncOp, the number of func::CallOp it contains.
322 llvm::MapVector<func::FuncOp, unsigned> numberCallOpsContainedInFuncOp;
323 for (mlir::Region &region : moduleOp->getRegions()) {
324 for (mlir::Block &block : region.getBlocks()) {
325 for (func::FuncOp funcOp : block.getOps<func::FuncOp>()) {
326 // Collect function calls and populate the caller map.
327 numberCallOpsContainedInFuncOp[funcOp] = 0;
328 WalkResult res = funcOp.walk([&](func::CallOp callOp) -> WalkResult {
329 func::FuncOp calledFunction = getCalledFunction(callOp, symbolTables);
330 assert(calledFunction && "could not retrieved called func::FuncOp");
331 // If the called function does not have any tensors in its signature,
332 // then it is not necessary to bufferize the callee before the caller.
333 if (!hasTensorSignature(calledFunction))
334 return WalkResult::skip();
335
336 callerMap[calledFunction].insert(callOp);
337 if (calledBy[calledFunction].insert(funcOp)) {
338 numberCallOpsContainedInFuncOp[funcOp]++;
339 }
340 return WalkResult::advance();
341 });
342 if (res.wasInterrupted())
343 return failure();
344 }
345 }
346 }
347
348 // Iteratively remove function operations that do not call any of the
349 // functions remaining in the callCounter map and add them to ordered list.
351
352 for (const auto &entry : numberCallOpsContainedInFuncOp) {
353 if (entry.second == 0)
354 worklist.push_back(entry.first);
355 }
356
357 while (!worklist.empty()) {
358 func::FuncOp func = worklist.pop_back_val();
359 orderedFuncOps.push_back(func);
360
361 for (func::FuncOp caller : calledBy[func]) {
362 auto &count = numberCallOpsContainedInFuncOp[caller];
363
364 if (--count == 0)
365 worklist.push_back(caller);
366 }
367
368 numberCallOpsContainedInFuncOp.erase(func);
369 }
370
371 // Put all other functions in the list of remaining functions. These are
372 // functions that call each other circularly.
373 for (auto it : numberCallOpsContainedInFuncOp)
374 remainingFuncOps.push_back(it.first);
375
376 return success();
377}
378
379/// Helper function that extracts the source from a memref.cast. If the given
380/// value is not a memref.cast result, simply returns the given value.
382 auto castOp = v.getDefiningOp<memref::CastOp>();
383 if (!castOp)
384 return v;
385 return castOp.getSource();
386}
387
388/// Helper function that returns the return types (skipping casts) of the given
389/// func.return ops. This function returns as many types as the return ops have
390/// operands. If the i-th operand is not the same for all func.return ops, then
391/// the i-th returned type is an "empty" type.
393 assert(!returnOps.empty() && "expected at least one ReturnOp");
394 int numOperands = returnOps.front()->getNumOperands();
395
396 // Helper function that unpacks memref.cast ops and returns the type.
397 auto getSourceType = [&](Value v) { return unpackCast(v).getType(); };
398
400 for (int i = 0; i < numOperands; ++i) {
401 // Get the type of the i-th operand of the first func.return ops.
402 Type t = getSourceType(returnOps.front()->getOperand(i));
403
404 // Check if all other func.return ops have a matching operand type.
405 for (int j = 1; j < static_cast<int>(returnOps.size()); ++j)
406 if (getSourceType(returnOps[j]->getOperand(i)) != t)
407 t = Type();
408
409 result.push_back(t);
410 }
411
412 return result;
413}
414
415/// Fold return values that are memref casts and update function return types.
416///
417/// During FuncOp bufferization, the exact type of the returned memrefs (if any)
418/// is not known yet. Therefore, the bufferization uses memref types with the
419/// most generic layout map as function return types. After bufferizing the
420/// entire function body, a more concise memref type can potentially be used for
421/// the return type of the function.
422static void foldMemRefCasts(func::FuncOp funcOp) {
423 // There is nothing to do for bodiless ops.
424 if (funcOp.getBody().empty())
425 return;
426
427 // Compute the common result types of all return ops.
428 SmallVector<func::ReturnOp> returnOps = getReturnOps(funcOp);
429 SmallVector<Type> resultTypes = getReturnTypes(returnOps);
430
431 // Remove direct casts.
432 for (func::ReturnOp returnOp : returnOps) {
433 for (OpOperand &operand : returnOp->getOpOperands()) {
434 // Bail if no common result type was found.
435 if (resultTypes[operand.getOperandNumber()]) {
436 operand.set(unpackCast(operand.get()));
437 }
438 }
439 }
440
441 // Fill in the missing result types that were not the same among all
442 // func.return ops.
443 for (int i = 0; i < static_cast<int>(resultTypes.size()); ++i) {
444 if (resultTypes[i])
445 continue;
446 resultTypes[i] = funcOp.getFunctionType().getResult(i);
447 }
448
449 // Update the function type.
450 auto newFuncType = FunctionType::get(
451 funcOp.getContext(), funcOp.getFunctionType().getInputs(), resultTypes);
452 funcOp.setType(newFuncType);
453}
454
455LogicalResult
458 BufferizationStatistics *statistics) {
459 assert(state.getOptions().bufferizeFunctionBoundaries &&
460 "expected that function boundary bufferization is activated");
462
463 // A list of non-circular functions in the order in which they are analyzed
464 // and bufferized.
465 SmallVector<func::FuncOp> orderedFuncOps;
466 // A list of all other functions. I.e., functions that call each other
467 // recursively. For these, we analyze the function body but not the function
468 // boundary.
469 SmallVector<func::FuncOp> remainingFuncOps;
470
471 // A mapping of FuncOps to their callers.
472 FuncCallerMap callerMap;
473
474 if (failed(getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps,
475 remainingFuncOps, callerMap,
476 funcState.symbolTables)))
477 return failure();
478
479 // Analyze functions in order. Starting with functions that are not calling
480 // any other functions.
481 for (func::FuncOp funcOp : orderedFuncOps) {
482 if (!state.getOptions().isOpAllowed(funcOp))
483 continue;
484
485 // Now analyzing function.
486 funcState.startFunctionAnalysis(funcOp);
487
488 // Analyze funcOp.
489 if (failed(analyzeOp(funcOp, state, statistics)))
490 return failure();
491
492 // Run some extra function analyses.
493 if (failed(aliasingFuncOpBBArgsAnalysis(funcOp, state, funcState)) ||
494 failed(funcOpBbArgReadWriteAnalysis(funcOp, state, funcState)))
495 return failure();
496
497 // Mark op as fully analyzed.
499 }
500
501 // Analyze all other functions. All function boundary analyses are skipped.
502 for (func::FuncOp funcOp : remainingFuncOps) {
503 if (!state.getOptions().isOpAllowed(funcOp))
504 continue;
505
506 // Analyze funcOp.
507 if (failed(analyzeOp(funcOp, state, statistics)))
508 return failure();
509
510 // TODO: We currently skip all function argument analyses for functions
511 // that call each other circularly. These analyses do not support recursive
512 // calls yet. The `BufferizableOpInterface` implementations of `func`
513 // dialect ops return conservative results in the absence of analysis
514 // information.
515 }
516
517 return success();
518}
519
521 Operation *moduleOp) {
522 for (mlir::Region &region : moduleOp->getRegions()) {
523 for (mlir::Block &block : region.getBlocks()) {
524 for (func::FuncOp funcOp : block.getOps<func::FuncOp>()) {
525 for (BlockArgument bbArg : funcOp.getArguments())
527 }
528 }
529 }
530}
531
534 BufferizationState &state, BufferizationStatistics *statistics) {
535 assert(options.bufferizeFunctionBoundaries &&
536 "expected that function boundary bufferization is activated");
537 IRRewriter rewriter(moduleOp->getContext());
538
539 // A list of non-circular functions in the order in which they are analyzed
540 // and bufferized.
541 SmallVector<func::FuncOp> orderedFuncOps;
542 // A list of all other functions. I.e., functions that call each other
543 // recursively. For these, we analyze the function body but not the function
544 // boundary.
545 SmallVector<func::FuncOp> remainingFuncOps;
546
547 // A mapping of FuncOps to their callers.
548 FuncCallerMap callerMap;
549
550 // Try to bufferize functions in calling order. I.e., first bufferize
551 // functions that do not call other functions. This allows us to infer
552 // accurate buffer types for function return values. Functions that call
553 // each other recursively are bufferized in an unspecified order at the end.
554 // We may use unnecessarily "complex" (in terms of layout map) buffer types.
555 if (failed(getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps,
556 remainingFuncOps, callerMap,
557 state.getSymbolTables())))
558 return failure();
559 llvm::append_range(orderedFuncOps, remainingFuncOps);
560
561 // Bufferize functions.
562 for (func::FuncOp funcOp : orderedFuncOps) {
563 // Note: It would be good to apply cleanups here but we cannot as aliasInfo
564 // would be invalidated.
565
566 if (llvm::is_contained(options.noAnalysisFuncFilter, funcOp.getSymName())) {
567 // This function was not analyzed and RaW conflicts were not resolved.
568 // Buffer copies must be inserted before every write.
569 OneShotBufferizationOptions updatedOptions = options;
570 updatedOptions.copyBeforeWrite = true;
571 if (failed(bufferizeOp(funcOp, updatedOptions, state, statistics)))
572 return failure();
573 } else {
574 if (failed(bufferizeOp(funcOp, options, state, statistics)))
575 return failure();
576 }
577
578 // Change buffer return types to more precise layout maps.
579 if (options.inferFunctionResultLayout)
580 foldMemRefCasts(funcOp);
581 }
582
583 // Bufferize all other ops.
584 for (mlir::Region &region : moduleOp->getRegions()) {
585 for (mlir::Block &block : region.getBlocks()) {
586 for (mlir::Operation &op :
587 llvm::make_early_inc_range(block.getOperations())) {
588 // Functions were already bufferized.
589 if (isa<func::FuncOp>(&op) || op.hasTrait<OpTrait::SymbolTable>())
590 continue;
591 if (failed(bufferizeOp(&op, options, state, statistics)))
592 return failure();
593 }
594 }
595 }
596
597 // Post-pass cleanup of function argument attributes.
599
600 return success();
601}
602
605 BufferizationState &state, BufferizationStatistics *statistics) {
606 assert(options.bufferizeFunctionBoundaries &&
607 "expected that function boundary bufferization is activated");
608 assert(!(options.copyBeforeWrite && options.testAnalysisOnly) &&
609 "invalid combination of bufferization flags");
610 if (!options.copyBeforeWrite) {
611 if (options.noAnalysisFuncFilter.empty()) {
612 if (failed(insertTensorCopies(moduleOp, options, state, statistics)))
613 return failure();
614 } else {
615 // FuncOps whose names are specified in options.noAnalysisFuncFilter will
616 // not be analyzed. Ops in these FuncOps will not be analyzed as well.
617 OpFilter::Entry::FilterFn analysisFilterFn = [=](Operation *op) {
618 auto func = dyn_cast<func::FuncOp>(op);
619 if (!func)
620 func = op->getParentOfType<func::FuncOp>();
621 if (func)
622 return llvm::is_contained(options.noAnalysisFuncFilter,
623 func.getSymName());
624 return false;
625 };
626 OneShotBufferizationOptions updatedOptions(options);
627 updatedOptions.opFilter.denyOperation(analysisFilterFn);
628 if (failed(
629 insertTensorCopies(moduleOp, updatedOptions, state, statistics)))
630 return failure();
631 }
632 }
633 if (options.testAnalysisOnly)
634 return success();
635 if (failed(bufferizeModuleOp(moduleOp, options, state, statistics)))
636 return failure();
637 return success();
638}
return success()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
DenseMap< func::FuncOp, DenseSet< Operation * > > FuncCallerMap
A mapping of FuncOps to their callers.
static SmallVector< Type > getReturnTypes(SmallVector< func::ReturnOp > returnOps)
Helper function that returns the return types (skipping casts) of the given func.return ops.
static FuncAnalysisState & getOrCreateFuncAnalysisState(OneShotAnalysisState &state)
Get or create FuncAnalysisState.
static bool hasTensorSignature(func::FuncOp funcOp)
Return "true" if the given function signature has tensor semantics.
static void removeBufferizationAttributes(BlockArgument bbArg)
Remove bufferization attributes on FuncOp arguments.
static void foldMemRefCasts(func::FuncOp funcOp)
Fold return values that are memref casts and update function return types.
static Value unpackCast(Value v)
Helper function that extracts the source from a memref.cast.
static LogicalResult getFuncOpsOrderedByCalls(Operation *moduleOp, SmallVectorImpl< func::FuncOp > &orderedFuncOps, SmallVectorImpl< func::FuncOp > &remainingFuncOps, FuncCallerMap &callerMap, SymbolTableCollection &symbolTables)
Store all functions of the moduleOp in orderedFuncOps, sorted by callee-caller order (i....
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class represents an argument of a Block.
Definition Value.h:306
unsigned getArgNumber() const
Returns the number of this argument.
Definition Value.h:318
Block * getOwner() const
Returns the block that owns this argument.
Definition Value.h:315
Block represents an ordered list of Operations.
Definition Block.h:33
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition Block.cpp:31
IRValueT get() const
Return the current value being used by this operand.
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
This class helps build Operations.
Definition Builders.h:209
This class represents an operand of an operation.
Definition Value.h:254
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition Value.cpp:226
A trait used to provide symbol table functionalities to a region operation.
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
Definition Operation.h:778
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
Definition Operation.h:563
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
Definition Operation.h:589
unsigned getNumOperands()
Definition Operation.h:375
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:259
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
Definition Operation.h:611
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition Operation.h:706
MLIRContext * getContext()
Return the context this operation is associated with.
Definition Operation.h:237
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition Region.h:26
This class represents a collection of SymbolTables.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
A utility result that is used to signal how to proceed with an ongoing walk:
Definition WalkResult.h:29
static WalkResult skip()
Definition WalkResult.h:48
static WalkResult advance()
Definition WalkResult.h:47
bool wasInterrupted() const
Returns true if the walk was interrupted.
Definition WalkResult.h:51
State for analysis-enabled bufferization.
bool isValueWritten(Value value) const
Return true if the buffer of the given tensor value is written to.
Ty & addExtension(Args &&...args)
Adds a new Extension of the type specified as template parameter, constructing it with the arguments ...
const OneShotBufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
Ty * getExtension()
Returns the extension of the specified type.
bool areEquivalentBufferizedValues(Value v1, Value v2) const override
Return true if v1 and v2 bufferize to equivalent buffers.
bool areAliasingBufferizedValues(Value v1, Value v2) const override
Return true if v1 and v2 may bufferize to aliasing buffers.
Operation * getOwner() const
Return the owner of this operand.
Definition UseDefLists.h:38
static FuncOp getCalledFunction(CallOpInterface callOp, SymbolTableCollection &symbolTables)
Return the FuncOp called by callOp.
LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, BufferizationState &bufferizationState, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
llvm::LogicalResult bufferizeModuleOp(Operation *moduleOp, const OneShotBufferizationOptions &options, BufferizationState &state, BufferizationStatistics *statistics=nullptr)
Bufferize an ops nested ops that implement BufferizableOpInterface.
void removeBufferizationAttributesInModule(Operation *moduleOp)
Remove bufferization attributes on every FuncOp arguments in the SymbolTable op.
LogicalResult analyzeOp(Operation *op, OneShotAnalysisState &state, BufferizationStatistics *statistics=nullptr)
Analyze op and its nested ops.
SmallVector< func::ReturnOp > getReturnOps(func::FuncOp funcOp)
Helper function that returns all func.return ops in the given function.
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, const BufferizationState &bufferizationState, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
llvm::LogicalResult analyzeModuleOp(Operation *moduleOp, OneShotAnalysisState &state, BufferizationStatistics *statistics=nullptr)
Analyze moduleOp and its nested ops.
llvm::LogicalResult runOneShotModuleBufferize(Operation *moduleOp, const bufferization::OneShotBufferizationOptions &options, BufferizationState &state, BufferizationStatistics *statistics=nullptr)
Run One-Shot Module Bufferization on the given SymbolTable.
Include the generated interface declarations.
llvm::SetVector< T, Vector, Set, N > SetVector
Definition LLVM.h:125
llvm::DenseMap< KeyT, ValueT, KeyInfoT, BucketT > DenseMap
Definition LLVM.h:120
Bufferization statistics for debugging.
Definition Bufferize.h:35
Options for analysis-enabled bufferization.
Extra analysis state that is required for bufferization of function boundaries.
DenseMap< FuncOp, IndexMapping > equivalentFuncArgs
A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg indices.
DenseMap< FuncOp, IndexToIndexListMapping > aliasingReturnVals
A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
SymbolTableCollection symbolTables
A collection of cached SymbolTables used for faster function lookup.
DenseMap< FuncOp, BbArgIndexSet > readBbArgs
A set of all read BlockArguments of FuncOps.
DenseMap< FuncOp, BbArgIndexSet > writtenBbArgs
A set of all written-to BlockArguments of FuncOps.
DenseMap< FuncOp, FuncOpAnalysisState > analyzedFuncOps
Keep track of which FuncOps are fully analyzed or currently being analyzed.
void startFunctionAnalysis(FuncOp funcOp)
This function is called right before analyzing the given FuncOp.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.