MLIR 23.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1//===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
15#include "mlir/IR/AsmState.h"
16#include "mlir/IR/Operation.h"
18#include "mlir/IR/Value.h"
20#include "llvm/ADT/ScopeExit.h"
21
22//===----------------------------------------------------------------------===//
23// BufferizableOpInterface
24//===----------------------------------------------------------------------===//
25
26namespace mlir {
27namespace bufferization {
28
29#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
30
31} // namespace bufferization
32} // namespace mlir
33
34MLIR_DEFINE_EXPLICIT_TYPE_ID(mlir::bufferization::AnalysisState)
35
36#define DEBUG_TYPE "bufferizable-op-interface"
37
38using namespace mlir;
39using namespace bufferization;
40
41static bool isRepetitiveRegion(Region *region,
43 Operation *op = region->getParentOp();
44 if (auto bufferizableOp = options.dynCastBufferizableOp(op))
45 if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
46 return true;
47 return false;
48}
49
50Region *AnalysisState::getEnclosingRepetitiveRegion(
52 if (!op->getBlock())
53 return nullptr;
54 if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
55 iter != enclosingRepetitiveRegionCache.end())
56 return iter->second;
57 return enclosingRepetitiveRegionCache[op] =
59}
60
61Region *AnalysisState::getEnclosingRepetitiveRegion(
62 Value value, const BufferizationOptions &options) {
63 if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
64 iter != enclosingRepetitiveRegionCache.end())
65 return iter->second;
66
67 Region *region = value.getParentRegion();
68 // Collect all visited regions since we only know the repetitive region we
69 // want to map it to later on
70 SmallVector<Region *> visitedRegions;
71 while (region) {
72 visitedRegions.push_back(region);
73 if (isRepetitiveRegion(region, options))
74 break;
75 region = region->getParentRegion();
76 }
77 enclosingRepetitiveRegionCache[value] = region;
78 for (Region *r : visitedRegions)
79 enclosingRepetitiveRegionCache[r] = region;
80 return region;
81}
82
83Region *AnalysisState::getEnclosingRepetitiveRegion(
84 Block *block, const BufferizationOptions &options) {
85 if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
86 iter != enclosingRepetitiveRegionCache.end())
87 return iter->second;
88
89 Region *region = block->getParent();
90 Operation *op = nullptr;
91 // Collect all visited regions since we only know the repetitive region we
92 // want to map it to later on
93 SmallVector<Region *> visitedRegions;
94 do {
95 op = region->getParentOp();
96 if (isRepetitiveRegion(region, options))
97 break;
98 } while ((region = op->getParentRegion()));
99
100 enclosingRepetitiveRegionCache[block] = region;
101 for (Region *r : visitedRegions)
102 enclosingRepetitiveRegionCache[r] = region;
103 return region;
104}
105
106bool AnalysisState::insideMutuallyExclusiveRegions(Operation *op0,
107 Operation *op1) {
108 auto key = std::make_pair(op0, op1);
109 if (auto iter = insideMutuallyExclusiveRegionsCache.find(key);
110 iter != insideMutuallyExclusiveRegionsCache.end())
111 return iter->second;
113 // Populate results for both orderings of the ops.
114 insideMutuallyExclusiveRegionsCache[key] = result;
115 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] = result;
116 return result;
117}
118
119void AnalysisState::resetCache() {
120 enclosingRepetitiveRegionCache.clear();
121 insideMutuallyExclusiveRegionsCache.clear();
122}
123
124SymbolTableCollection &BufferizationState::getSymbolTables() {
125 return symbolTables;
126}
127
128SymbolTableCollection &BufferizationState::getSymbolTables() const {
129 return symbolTables;
130}
131
132Region *bufferization::getNextEnclosingRepetitiveRegion(
133 Region *region, const BufferizationOptions &options) {
134 assert(isRepetitiveRegion(region, options) && "expected repetitive region");
135 while ((region = region->getParentRegion())) {
136 if (isRepetitiveRegion(region, options))
137 break;
138 }
139 return region;
140}
141
142Region *bufferization::getParallelRegion(Region *region,
143 const BufferizationOptions &options) {
144 while (region) {
145 auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
146 if (bufferizableOp &&
147 bufferizableOp.isParallelRegion(region->getRegionNumber())) {
148 assert(isRepetitiveRegion(region, options) &&
149 "expected that all parallel regions are also repetitive regions");
150 return region;
151 }
152 region = region->getParentRegion();
153 }
154 return nullptr;
155}
156
157Operation *bufferization::getOwnerOfValue(Value value) {
158 if (auto opResult = llvm::dyn_cast<OpResult>(value))
159 return opResult.getDefiningOp();
160 return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
161}
162
163/// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
164/// shaped value is copied. Otherwise, a tensor with undefined contents is
165/// allocated.
166FailureOr<Value> bufferization::allocateTensorForShapedValue(
167 OpBuilder &b, Location loc, Value shapedValue,
168 const BufferizationOptions &options, const BufferizationState &state,
169 bool copy) {
171 if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
172 tensor = shapedValue;
173 } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
174 tensor = ToTensorOp::create(
175 b, loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()),
176 shapedValue);
177 } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
178 llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
179 return getOwnerOfValue(shapedValue)
180 ->emitError("copying of unranked tensors is not implemented");
181 } else {
182 llvm_unreachable("expected RankedTensorType or MemRefType");
183 }
184 RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
185 SmallVector<Value> dynamicSizes;
186 if (!copy) {
187 // Compute the dynamic part of the shape.
188 // First try to query the shape via ReifyRankedShapedTypeOpInterface.
189 bool reifiedShapes = false;
190 if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
191 llvm::isa<OpResult>(shapedValue)) {
193 if (succeeded(
194 reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
195 reifiedShapes = true;
196 auto &shape =
197 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
198 for (const auto &dim : enumerate(tensorType.getShape())) {
199 if (ShapedType::isDynamic(dim.value())) {
200 dynamicSizes.push_back(
201 getValueOrCreateConstantIndexOp(b, loc, shape[dim.index()]));
202 }
203 }
204 }
205 }
206
207 // If the shape could not be reified, create DimOps.
208 if (!reifiedShapes)
209 populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
210 }
211
212 // Create AllocTensorOp.
213 auto allocTensorOp = AllocTensorOp::create(b, loc, tensorType, dynamicSizes,
214 copy ? tensor : Value());
215
216 // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
217 if (copy)
218 return allocTensorOp.getResult();
219 auto copyBufferType =
220 detail::asMemRefType(getBufferType(tensor, options, state));
221 if (failed(copyBufferType))
222 return failure();
223 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
224 if (!memorySpace)
225 memorySpace = options.defaultMemorySpaceFn(tensorType);
226 if (memorySpace.has_value())
227 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
228 return allocTensorOp.getResult();
229}
230
231LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
232 RewriterBase &rewriter, const AnalysisState &analysisState,
233 const BufferizationState &bufferizationState) {
234 OpBuilder::InsertionGuard g(rewriter);
235 Operation *op = getOperation();
236 SmallVector<OpOperand *> outOfPlaceOpOperands;
237 DenseSet<OpOperand *> copiedOpOperands;
238 SmallVector<Value> outOfPlaceValues;
239 DenseSet<Value> copiedOpValues;
240
241 // Find all out-of-place OpOperands.
242 for (OpOperand &opOperand : op->getOpOperands()) {
243 Type operandType = opOperand.get().getType();
244 if (!llvm::isa<TensorType>(operandType))
245 continue;
246 if (analysisState.isInPlace(opOperand))
247 continue;
248 if (llvm::isa<UnrankedTensorType>(operandType))
249 return op->emitError("copying of unranked tensors is not implemented");
250
251 AliasingValueList aliasingValues =
252 analysisState.getAliasingValues(opOperand);
253 if (aliasingValues.getNumAliases() == 1 &&
254 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
255 !analysisState.bufferizesToMemoryWrite(opOperand) &&
256 analysisState
257 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
258 .getNumAliases() == 1 &&
259 !isa<UnrankedTensorType>(
260 aliasingValues.getAliases()[0].value.getType())) {
261 // The op itself does not write but may create exactly one alias. Instead
262 // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
263 // be smaller than the OpOperand (e.g., in the case of an extract_slice,
264 // where the result is usually a smaller part of the source). Do not apply
265 // this optimization if the OpResult is an unranked tensor (because those
266 // cannot be copied at the moment).
267 Value value = aliasingValues.getAliases()[0].value;
268 outOfPlaceValues.push_back(value);
269 if (!analysisState.canOmitTensorCopy(opOperand))
270 copiedOpValues.insert(value);
271 } else {
272 // In all other cases, make a copy of the OpOperand.
273 outOfPlaceOpOperands.push_back(&opOperand);
274 if (!analysisState.canOmitTensorCopy(opOperand))
275 copiedOpOperands.insert(&opOperand);
276 }
277 }
278
279 // Insert copies of OpOperands.
280 rewriter.setInsertionPoint(op);
281 for (OpOperand *opOperand : outOfPlaceOpOperands) {
282 FailureOr<Value> copy = allocateTensorForShapedValue(
283 rewriter, op->getLoc(), opOperand->get(), analysisState.getOptions(),
284 bufferizationState, copiedOpOperands.contains(opOperand));
285 if (failed(copy))
286 return failure();
287 rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
288 }
289
290 // Insert copies of Values.
291 rewriter.setInsertionPointAfter(op);
292 for (Value value : outOfPlaceValues) {
293 FailureOr<Value> copy = allocateTensorForShapedValue(
294 rewriter, op->getLoc(), value, analysisState.getOptions(),
295 bufferizationState, copiedOpValues.count(value));
296 if (failed(copy))
297 return failure();
298 SmallVector<OpOperand *> uses = llvm::to_vector(
299 llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
300 for (OpOperand *use : uses) {
301 // Do not update the alloc_tensor op that we just created.
302 if (use->getOwner() == copy->getDefiningOp())
303 continue;
304 // tensor.dim ops may have been created to be used as alloc_tensor op
305 // dynamic extents. Do not update these either.
306 if (isa<tensor::DimOp>(use->getOwner()))
307 continue;
308 rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
309 }
310 }
311
312 return success();
313}
314
315//===----------------------------------------------------------------------===//
316// OpFilter
317//===----------------------------------------------------------------------===//
318
319bool OpFilter::isOpAllowed(Operation *op) const {
320 // All other ops: Allow/disallow according to filter.
321 bool isAllowed = !hasAllowRule();
322 for (const Entry &entry : entries) {
323 bool filterResult = entry.fn(op);
324 switch (entry.type) {
325 case Entry::ALLOW:
326 isAllowed |= filterResult;
327 break;
328 case Entry::DENY:
329 if (filterResult)
330 // DENY filter matches. This op is no allowed. (Even if other ALLOW
331 // filters may match.)
332 return false;
333 };
334 }
335 return isAllowed;
336}
337
338//===----------------------------------------------------------------------===//
339// BufferizationOptions
340//===----------------------------------------------------------------------===//
341
342namespace {
343
344/// Default function arg type converter: Use a fully dynamic layout map.
345BufferLikeType
346defaultFunctionArgTypeConverter(TensorLikeType type, Attribute memorySpace,
347 func::FuncOp funcOp,
348 const BufferizationOptions &options) {
349 if (auto tensorType = mlir::dyn_cast<TensorType>(type)) {
350 return cast<BufferLikeType>(
351 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
352 }
353
354 // If not builtin, fallback to TensorLikeType::getBufferType()
355 auto bufferType =
356 type.getBufferType(options, [&]() { return funcOp->emitError(); });
357 assert(succeeded(bufferType) &&
358 "a valid buffer is always expected at function boundary");
359 return *bufferType;
360}
361/// Default unknown type converter: Use a fully dynamic layout map.
363defaultUnknownTypeConverter(TensorType tensorType, Attribute memorySpace,
364 const BufferizationOptions &options) {
365 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
366}
367
368} // namespace
369
370// Default constructor for BufferizationOptions.
371BufferizationOptions::BufferizationOptions()
372 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
373 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
374
375bool BufferizationOptions::isOpAllowed(Operation *op) const {
376 // Special case: If function boundary bufferization is deactivated, do not
377 // allow ops that belong to the `func` dialect.
378 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
379 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
380 return false;
381
382 return opFilter.isOpAllowed(op);
383}
384
385BufferizableOpInterface
386BufferizationOptions::dynCastBufferizableOp(Operation *op) const {
387 if (!isOpAllowed(op))
388 return nullptr;
389 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
390 if (!bufferizableOp)
391 return nullptr;
392 return bufferizableOp;
393}
394
395BufferizableOpInterface
396BufferizationOptions::dynCastBufferizableOp(Value value) const {
397 return dynCastBufferizableOp(getOwnerOfValue(value));
398}
399
400void BufferizationOptions::setFunctionBoundaryTypeConversion(
401 LayoutMapOption layoutMapOption) {
402 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
403 func::FuncOp funcOp,
404 const BufferizationOptions &options) {
405 if (auto tensorType = mlir::dyn_cast<TensorType>(type)) {
406 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
407 return cast<BufferLikeType>(
408 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
409 memorySpace));
410 return cast<BufferLikeType>(
411 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
412 memorySpace));
413 }
414
415 // If not builtin, fallback to TensorLikeType::getBufferType()
416 auto bufferType =
417 type.getBufferType(options, [&]() { return funcOp->emitError(); });
418 assert(succeeded(bufferType) &&
419 "a valid buffer is always expected at function boundary");
420 return *bufferType;
421 };
422 inferFunctionResultLayout =
423 layoutMapOption == LayoutMapOption::InferLayoutMap;
424}
425
426//===----------------------------------------------------------------------===//
427// Helper functions for BufferizableOpInterface
428//===----------------------------------------------------------------------===//
429
431 if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
432 b.setInsertionPointToStart(bbArg.getOwner());
433 } else {
434 b.setInsertionPointAfter(value.getDefiningOp());
435 }
436}
437
438/// Determine which OpOperand* will alias with `value` if the op is bufferized
439/// in place. Return all tensor OpOperand* if the op is not bufferizable.
440AliasingOpOperandList AnalysisState::getAliasingOpOperands(Value value) const {
441 if (Operation *op = getOwnerOfValue(value))
442 if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
443 return bufferizableOp.getAliasingOpOperands(value, *this);
444
445 // The op is not bufferizable.
446 return detail::unknownGetAliasingOpOperands(value);
447}
448
449/// Determine which Values will alias with `opOperand` if the op is bufferized
450/// in place. Return all tensor Values if the op is not bufferizable.
451AliasingValueList AnalysisState::getAliasingValues(OpOperand &opOperand) const {
452 if (auto bufferizableOp =
453 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
454 return bufferizableOp.getAliasingValues(opOperand, *this);
455
456 // The op is not bufferizable.
457 return detail::unknownGetAliasingValues(opOperand);
458}
459
460/// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
461/// op is not bufferizable.
462bool AnalysisState::bufferizesToMemoryRead(OpOperand &opOperand) const {
463 if (auto bufferizableOp =
464 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
465 return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
466
467 // Unknown op that returns a tensor. The inplace analysis does not support it.
468 // Conservatively return true.
469 return true;
470}
471
472/// Return true if `opOperand` bufferizes to a memory write. Return
473/// `true` if the op is not bufferizable.
474bool AnalysisState::bufferizesToMemoryWrite(OpOperand &opOperand) const {
475 if (auto bufferizableOp =
476 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
477 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
478
479 // Unknown op that returns a tensor. The inplace analysis does not support it.
480 // Conservatively return true.
481 return true;
482}
483
484/// Return true if `opOperand` does neither read nor write but bufferizes to an
485/// alias. Return false if the op is not bufferizable.
486bool AnalysisState::bufferizesToAliasOnly(OpOperand &opOperand) const {
487 if (auto bufferizableOp =
488 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
489 return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
490
491 // Unknown op that returns a tensor. The inplace analysis does not support it.
492 // Conservatively return false.
493 return false;
494}
495
496bool AnalysisState::bufferizesToMemoryWrite(Value value) const {
497 auto opResult = llvm::dyn_cast<OpResult>(value);
498 if (!opResult)
499 return true;
500 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
501 if (!bufferizableOp)
502 return true;
503 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
504}
505
506/// Return true if the given value is read by an op that bufferizes to a memory
507/// read. Also takes into account ops that create an alias but do not read by
508/// themselves (e.g., ExtractSliceOp).
509bool AnalysisState::isValueRead(Value value) const {
510 assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
511 SmallVector<OpOperand *> workingSet;
512 DenseSet<OpOperand *> visited;
513 for (OpOperand &use : value.getUses())
514 workingSet.push_back(&use);
515
516 while (!workingSet.empty()) {
517 OpOperand *uMaybeReading = workingSet.pop_back_val();
518 if (!visited.insert(uMaybeReading).second)
519 continue;
520
521 // Skip over all ops that neither read nor write (but create an alias).
522 if (bufferizesToAliasOnly(*uMaybeReading))
523 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
524 for (OpOperand &use : alias.value.getUses())
525 workingSet.push_back(&use);
526 if (bufferizesToMemoryRead(*uMaybeReading))
527 return true;
528 }
529
530 return false;
531}
532
533// Starting from `opOperand`, follow the use-def chain in reverse, always
534// selecting the aliasing OpOperands. Find and return Values for which
535// `condition` evaluates to true. Uses of such matching Values are not
536// traversed any further, the visited aliasing opOperands will be preserved
537// through `visitedOpOperands`.
538llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
539 OpOperand *opOperand, llvm::function_ref<bool(Value)> condition,
540 TraversalConfig config,
541 llvm::DenseSet<OpOperand *> *visitedOpOperands) const {
542 llvm::DenseSet<Value> visited;
543 llvm::SetVector<Value> result, workingSet;
544 workingSet.insert(opOperand->get());
545
546 if (visitedOpOperands)
547 visitedOpOperands->insert(opOperand);
548
549 while (!workingSet.empty()) {
550 Value value = workingSet.pop_back_val();
551
552 if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
553 // Stop traversal if value was already visited.
554 if (config.alwaysIncludeLeaves)
555 result.insert(value);
556 continue;
557 }
558 visited.insert(value);
559
560 if (condition(value)) {
561 result.insert(value);
562 continue;
563 }
564
565 if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
566 // Stop iterating if `followUnknownOps` is unset and the op is either
567 // not bufferizable or excluded in the OpFilter.
568 if (config.alwaysIncludeLeaves)
569 result.insert(value);
570 continue;
571 }
572
573 AliasingOpOperandList aliases = getAliasingOpOperands(value);
574 if (aliases.getNumAliases() == 0) {
575 // The traversal ends naturally if there are no more OpOperands that
576 // could be followed.
577 if (config.alwaysIncludeLeaves)
578 result.insert(value);
579 continue;
580 }
581
582 for (AliasingOpOperand a : aliases) {
583 if (config.followEquivalentOnly &&
584 a.relation != BufferRelation::Equivalent) {
585 // Stop iterating if `followEquivalentOnly` is set but the alias is not
586 // equivalent.
587 if (config.alwaysIncludeLeaves)
588 result.insert(value);
589 continue;
590 }
591
592 if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
593 // Stop iterating if `followInPlaceOnly` is set but the alias is
594 // out-of-place.
595 if (config.alwaysIncludeLeaves)
596 result.insert(value);
597 continue;
598 }
599
600 if (config.followSameTypeOrCastsOnly &&
601 a.opOperand->get().getType() != value.getType() &&
602 !value.getDefiningOp<CastOpInterface>()) {
603 // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
604 // has a different type and the op is not a cast.
605 if (config.alwaysIncludeLeaves)
606 result.insert(value);
607 continue;
608 }
609
610 workingSet.insert(a.opOperand->get());
611 if (visitedOpOperands)
612 visitedOpOperands->insert(a.opOperand);
613 }
614 }
615
616 return result;
617}
618
619// Find the values that define the contents of the given operand's value.
620llvm::SetVector<Value>
621AnalysisState::findDefinitions(OpOperand *opOperand) const {
622 TraversalConfig config;
623 config.alwaysIncludeLeaves = false;
624 return findValueInReverseUseDefChain(
625 opOperand, [&](Value v) { return this->bufferizesToMemoryWrite(v); },
626 config);
627}
628
629AnalysisState::AnalysisState(const BufferizationOptions &options)
631
633 : options(options), type(type) {
634 for (const BufferizationOptions::AnalysisStateInitFn &fn :
635 options.stateInitializers)
636 fn(*this);
637}
638
639bool AnalysisState::canOmitTensorCopy(OpOperand &opOperand) const {
640 // Do not copy if the tensor has undefined contents.
641 if (hasUndefinedContents(&opOperand))
642 return true;
643
644 // Do not copy if the buffer of the tensor is entirely overwritten (with
645 // values that do not depend on the old tensor).
646 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
647 return true;
648
649 // Do not copy if the tensor is never read.
650 AliasingValueList aliases = getAliasingValues(opOperand);
651 if (!bufferizesToMemoryRead(opOperand) &&
652 llvm::none_of(aliases,
653 [&](AliasingValue a) { return isValueRead(a.value); }))
654 return true;
655
656 // Default: Cannot omit the copy.
657 return false;
658}
659
660bool AnalysisState::isInPlace(OpOperand &opOperand) const {
661 // ToBufferOps are always in-place.
662 if (isa<ToBufferOp>(opOperand.getOwner()))
663 return true;
664
665 // In the absence of analysis information, OpOperands that bufferize to a
666 // memory write are out-of-place, i.e., an alloc and copy is inserted.
667 return !bufferizesToMemoryWrite(opOperand);
668}
669
670bool AnalysisState::areEquivalentBufferizedValues(Value v1, Value v2) const {
671 // In the absence of analysis information, we do not know if the values are
672 // equivalent. The conservative answer is "false".
673 return false;
674}
675
676bool AnalysisState::areAliasingBufferizedValues(Value v1, Value v2) const {
677 // In the absence of analysis information, we do not know if the values may be
678 // aliasing. The conservative answer is "true".
679 return true;
680}
681
682bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const {
683 // In the absence of analysis information, the conservative answer is "false".
684 return false;
685}
686
687FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
688 const BufferizationOptions &options,
689 const BufferizationState &state) {
690#ifndef NDEBUG
691 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.getType());
692 assert(tensorType && "unexpected non-tensor type");
693#endif // NDEBUG
694
695 // Replace "%t = to_tensor %m" with %m.
696 if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
697 return toTensorOp.getBuffer();
698
699 // Insert to_buffer op.
700 OpBuilder::InsertionGuard g(rewriter);
701 setInsertionPointAfter(rewriter, value);
702 FailureOr<BufferLikeType> bufferType = getBufferType(value, options, state);
703 if (failed(bufferType))
704 return failure();
705
706 return bufferization::ToBufferOp::create(rewriter, value.getLoc(),
707 *bufferType, value)
708 .getResult();
709}
710
711/// Return the buffer type for a given Value (tensor) after bufferization.
712FailureOr<BufferLikeType>
713bufferization::getBufferType(Value value, const BufferizationOptions &options,
714 const BufferizationState &state) {
715 SmallVector<Value> invocationStack;
716 return getBufferType(value, options, state, invocationStack);
717}
718
719/// Return the buffer type for a given Value (tensor) after bufferization.
720FailureOr<BufferLikeType>
721bufferization::getBufferType(Value value, const BufferizationOptions &options,
722 const BufferizationState &state,
723 SmallVector<Value> &invocationStack) {
724 assert(llvm::isa<TensorLikeType>(value.getType()) &&
725 "unexpected non-tensor type");
726 invocationStack.push_back(value);
727 llvm::scope_exit popFromStack([&]() { invocationStack.pop_back(); });
728
729 // Try querying BufferizableOpInterface.
730 Operation *op = getOwnerOfValue(value);
731 auto bufferizableOp = options.dynCastBufferizableOp(op);
732 if (bufferizableOp)
733 return bufferizableOp.getBufferType(value, options, state, invocationStack);
734
735 // Op is not bufferizable.
736 return cast<TensorLikeType>(value.getType()).getBufferType(options, [&]() {
737 return op->emitError();
738 });
739}
740
741bool bufferization::hasTensorSemantics(Operation *op) {
742 if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
743 return bufferizableOp.hasTensorSemantics();
744 return detail::defaultHasTensorSemantics(op);
745}
746
747void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
748 Operation *op,
749 ValueRange values) {
750 assert(values.size() == op->getNumResults() &&
751 "expected one value per OpResult");
752 OpBuilder::InsertionGuard g(rewriter);
753
754 // Replace all OpResults with the given values.
755 SmallVector<Value> replacements;
756 for (OpResult opResult : op->getOpResults()) {
757 Value replacement = values[opResult.getResultNumber()];
758 if (llvm::isa<TensorLikeType>(opResult.getType())) {
759 // The OpResult is a tensor. Such values are replaced with memrefs during
760 // bufferization.
761 assert(llvm::isa<BufferLikeType>(replacement.getType()) &&
762 "tensor op result should be replaced with a buffer value");
763 // The existing uses of the OpResult still expect a tensor. Insert a
764 // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
765 // loose all of its users and eventually DCE away.
766 rewriter.setInsertionPointAfter(op);
767 replacement = bufferization::ToTensorOp::create(
768 rewriter, replacement.getLoc(), opResult.getType(), replacement);
769 }
770 replacements.push_back(replacement);
771 }
772
773 rewriter.replaceOp(op, replacements);
774}
775
776//===----------------------------------------------------------------------===//
777// Bufferization-specific scoped alloc insertion support.
778//===----------------------------------------------------------------------===//
779
780/// Create a memref allocation with the given type and dynamic extents.
781FailureOr<Value> BufferizationOptions::createAlloc(OpBuilder &b, Location loc,
782 MemRefType type,
783 ValueRange dynShape) const {
784 if (allocationFn)
785 return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
786
787 // Default bufferallocation via AllocOp.
788 if (bufferAlignment != 0)
789 return memref::AllocOp::create(b, loc, type, dynShape,
790 b.getI64IntegerAttr(bufferAlignment))
791 .getResult();
792 return memref::AllocOp::create(b, loc, type, dynShape).getResult();
793}
794
795/// Create a memory copy between two memref buffers.
796LogicalResult BufferizationOptions::createMemCpy(OpBuilder &b, Location loc,
797 Value from, Value to) const {
798 if (memCpyFn)
799 return (*memCpyFn)(b, loc, from, to);
800
801 memref::CopyOp::create(b, loc, from, to);
802 return success();
803}
804
805//===----------------------------------------------------------------------===//
806// Bufferization-specific IRMapping support with debugging.
807//===----------------------------------------------------------------------===//
808
809BaseMemRefType bufferization::getMemRefType(TensorType tensorType,
810 const BufferizationOptions &options,
811 MemRefLayoutAttrInterface layout,
812 Attribute memorySpace) {
813 // Case 1: Unranked memref type.
814 if (auto unrankedTensorType =
815 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
816 assert(!layout && "UnrankedTensorType cannot have a layout map");
817 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
818 memorySpace);
819 }
820
821 // Case 2: Ranked memref type with specified layout.
822 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
823 if (layout) {
824 return MemRefType::get(rankedTensorType.getShape(),
825 rankedTensorType.getElementType(), layout,
826 memorySpace);
827 }
828
829 return options.unknownTypeConverterFn(tensorType, memorySpace, options);
830}
831
833bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
834 Attribute memorySpace) {
835 // Case 1: Unranked memref type.
836 if (auto unrankedTensorType =
837 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
838 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
839 memorySpace);
840 }
841
842 // Case 2: Ranked memref type.
843 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
844 int64_t dynamicOffset = ShapedType::kDynamic;
845 SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
846 ShapedType::kDynamic);
847 auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
848 dynamicOffset, dynamicStrides);
849 return MemRefType::get(rankedTensorType.getShape(),
850 rankedTensorType.getElementType(), stridedLayout,
851 memorySpace);
852}
853
854/// Return a MemRef type with a static identity layout (i.e., no layout map). If
855/// the given tensor type is unranked, return an unranked MemRef type.
857bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
858 Attribute memorySpace) {
859 // Case 1: Unranked memref type.
860 if (auto unrankedTensorType =
861 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
862 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
863 memorySpace);
864 }
865
866 // Case 2: Ranked memref type.
867 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
868 MemRefLayoutAttrInterface layout = {};
869 return MemRefType::get(rankedTensorType.getShape(),
870 rankedTensorType.getElementType(), layout,
871 memorySpace);
872}
873
874//===----------------------------------------------------------------------===//
875// Default implementations of interface methods
876//===----------------------------------------------------------------------===//
877
878bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
879 OpResult opResult, const AnalysisState &state) {
880 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
881 AliasingOpOperandList opOperands =
882 bufferizableOp.getAliasingOpOperands(opResult, state);
883
884 // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
885 // memory writes.
886 if (opOperands.getAliases().empty())
887 return true;
888
889 // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
890 // may bufferize to a memory write.
891 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
892 return state.bufferizesToMemoryWrite(*alias.opOperand);
893 }))
894 return true;
895
896 // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
897 // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
898 // case, the OpResult bufferizes to a memory write. E.g.:
899 //
900 // %0 = "some_writing_op" : tensor<?xf32>
901 // %r = scf.if ... -> tensor<?xf32> {
902 // scf.yield %0 : tensor<?xf32>
903 // } else {
904 // %1 = "another_writing_op"(%0) : tensor<?xf32>
905 // scf.yield %1 : tensor<?xf32>
906 // }
907 // "some_reading_op"(%r)
908 //
909 // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
910 // bufferizes to a memory write and the defining op is inside the scf.if.
911 //
912 // Note: This treatment of surrouding ops is useful for ops that have a
913 // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
914 // the analysis considerably.
915 //
916 // "another_writing_op" in the above example should be able to bufferize
917 // inplace in the absence of another read of %0. However, if the scf.if op
918 // would not be considered a "write", the analysis would detect the
919 // following conflict:
920 //
921 // * read = some_reading_op
922 // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
923 // * conflictingWrite = %1
924 //
925 auto isMemoryWriteInsideOp = [&](Value v) {
926 Operation *op = getOwnerOfValue(v);
927 if (!opResult.getDefiningOp()->isAncestor(op))
928 return false;
929 return state.bufferizesToMemoryWrite(v);
930 };
931 TraversalConfig config;
932 config.alwaysIncludeLeaves = false;
933 for (AliasingOpOperand alias : opOperands) {
934 if (!state
935 .findValueInReverseUseDefChain(alias.opOperand,
936 isMemoryWriteInsideOp, config)
937 .empty())
938 return true;
939 }
940 return false;
941}
942
943// Compute the AliasingOpOperandList for a given Value based on
944// getAliasingValues.
945AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
946 Value value, const AnalysisState &state) {
947 Operation *op = getOwnerOfValue(value);
949 for (OpOperand &opOperand : op->getOpOperands()) {
950 if (!llvm::isa<TensorType>(opOperand.get().getType()))
951 continue;
952 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
953 for (const auto &it : aliasingValues)
954 if (it.value == value)
955 result.emplace_back(&opOperand, it.relation, it.isDefinite);
956 }
957 return AliasingOpOperandList(std::move(result));
958}
959
960FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
961 Value value, const BufferizationOptions &options,
962 const BufferizationState &bufferizationState,
963 SmallVector<Value> &invocationStack) {
964 assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
965 auto tensorType = cast<TensorType>(value.getType());
966
967 auto elementType = tensorType.getElementType();
968
969 if (!BaseMemRefType::isValidElementType(elementType))
970 return getOwnerOfValue(value)->emitError()
971 << "cannot bufferize value of type " << tensorType
972 << ": element type " << elementType
973 << " is not a valid memref element type";
974
975 // No further analysis is possible for a block argument.
976 if (llvm::isa<BlockArgument>(value)) {
977 return cast<BufferLikeType>(
978 bufferization::getMemRefType(tensorType, options));
979 }
980
981 // Value is an OpResult.
982 Operation *op = getOwnerOfValue(value);
983 auto opResult = llvm::cast<OpResult>(value);
984 AnalysisState analysisState(options);
985 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
986 if (aliases.getNumAliases() > 0 &&
987 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
988 // If the OpResult has an equivalent OpOperand, both OpResult and
989 // OpOperand bufferize to the exact same buffer type.
990 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
991 return getBufferType(equivalentOperand, options, bufferizationState,
992 invocationStack);
993 }
994
995 // If we do not know the memory space and there is no default memory space,
996 // report a failure.
997 auto memSpace =
998 options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
999 if (!memSpace.has_value())
1000 return op->emitError("could not infer memory space");
1001
1002 return cast<BufferLikeType>(
1003 getMemRefType(tensorType, options, /*layout=*/{}, *memSpace));
1004}
1005
1006bool bufferization::detail::defaultIsRepetitiveRegion(
1007 BufferizableOpInterface bufferizableOp, unsigned index) {
1008 assert(index < bufferizableOp->getNumRegions() && "invalid region index");
1009 auto regionInterface =
1010 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1011 if (!regionInterface)
1012 return false;
1013 return regionInterface.isRepetitiveRegion(index);
1014}
1015
1016AliasingOpOperandList
1017bufferization::detail::unknownGetAliasingOpOperands(Value value) {
1018 // TODO: Take into account successor blocks.
1019 // No aliasing in case of non-entry blocks.
1020 if (auto bbArg = dyn_cast<BlockArgument>(value))
1021 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1022 return {};
1023
1024 // Unknown op: Conservatively assume that each OpResult may alias with every
1025 // OpOperand. In addition, each block argument of an entry block may alias
1026 // with every OpOperand.
1027 AliasingOpOperandList r;
1028 for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
1029 if (isa<TensorType>(operand.get().getType()))
1030 r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
1031 return r;
1032}
1033
1034AliasingValueList
1035bufferization::detail::unknownGetAliasingValues(OpOperand &opOperand) {
1036 // TODO: Take into account successor blocks.
1037 // Unknown op: Conservatively assume that each OpResult may alias with every
1038 // OpOperand. In addition, each block argument of an entry block may alias
1039 // with every OpOperand.
1040 AliasingValueList r;
1041 for (OpResult result : opOperand.getOwner()->getOpResults())
1042 if (llvm::isa<TensorType>(result.getType()))
1043 r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
1044 for (Region &region : opOperand.getOwner()->getRegions())
1045 if (!region.getBlocks().empty())
1046 for (BlockArgument bbArg : region.getBlocks().front().getArguments())
1047 if (isa<TensorType>(bbArg.getType()))
1048 r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
1049 return r;
1050}
1051
1052bool bufferization::detail::defaultHasTensorSemantics(Operation *op) {
1053 auto isaTensor = [](Type t) { return isa<TensorLikeType>(t); };
1054 bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
1055 return any_of(r.getBlocks(), [&](Block &b) {
1056 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1057 return isaTensor(bbArg.getType());
1058 });
1059 });
1060 });
1061 if (hasTensorBlockArgument)
1062 return true;
1063
1064 if (any_of(op->getResultTypes(), isaTensor))
1065 return true;
1066 return any_of(op->getOperandTypes(), isaTensor);
1067}
1068
1069FailureOr<BaseMemRefType>
1070bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1071 if (failed(bufferType))
1072 return failure();
1073 return cast<BaseMemRefType>(*bufferType);
1074}
1075
1076bool bufferization::detail::typesMatchAfterBufferization(Operation &op,
1077 Value tensor,
1078 Value buffer) {
1079 return mlir::succeeded(
1080 cast<TensorLikeType>(tensor.getType())
1081 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.getType()),
1082 [&]() { return op.emitError(); }));
1083}
return success()
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition TypeID.h:323
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
This class represents an argument of a Block.
Definition Value.h:309
Block represents an ordered list of Operations.
Definition Block.h:33
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition Block.cpp:27
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition Builders.h:348
This class helps build Operations.
Definition Builders.h:207
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition Builders.h:412
This class represents an operand of an operation.
Definition Value.h:257
This is a value defined by a result of an operation.
Definition Value.h:457
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition Operation.h:220
Block * getBlock()
Returns the operation block that contains this operation.
Definition Operation.h:213
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition Operation.h:234
MutableArrayRef< OpOperand > getOpOperands()
Definition Operation.h:383
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
Definition Operation.h:397
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition Operation.h:677
result_type_range getResultTypes()
Definition Operation.h:428
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition Operation.h:263
result_range getOpResults()
Definition Operation.h:420
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition Region.h:200
BlockListType & getBlocks()
Definition Region.h:45
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Type getElementType() const
Returns the element type of this tensor type.
This class provides an efficient unique identifier for a specific C++ type.
Definition TypeID.h:107
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition Value.h:188
Location getLoc() const
Return the location of this value.
Definition Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition Value.cpp:39
Operation * getOwner() const
Return the owner of this operand.
Definition UseDefLists.h:38
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition Matchers.h:344
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
Definition MemRefOps.cpp:61
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:573
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Definition LLVM.h:120
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition Utils.cpp:111
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...