MLIR 22.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1//===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
15#include "mlir/IR/AsmState.h"
16#include "mlir/IR/Operation.h"
18#include "mlir/IR/Value.h"
20#include "llvm/ADT/ScopeExit.h"
21
22//===----------------------------------------------------------------------===//
23// BufferizableOpInterface
24//===----------------------------------------------------------------------===//
25
26namespace mlir {
27namespace bufferization {
28
29#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
30
31} // namespace bufferization
32} // namespace mlir
33
34MLIR_DEFINE_EXPLICIT_TYPE_ID(mlir::bufferization::AnalysisState)
35
36#define DEBUG_TYPE "bufferizable-op-interface"
37
38using namespace mlir;
39using namespace bufferization;
40
41static bool isRepetitiveRegion(Region *region,
43 Operation *op = region->getParentOp();
44 if (auto bufferizableOp = options.dynCastBufferizableOp(op))
45 if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
46 return true;
47 return false;
48}
49
50Region *AnalysisState::getEnclosingRepetitiveRegion(
52 if (!op->getBlock())
53 return nullptr;
54 if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
55 iter != enclosingRepetitiveRegionCache.end())
56 return iter->second;
57 return enclosingRepetitiveRegionCache[op] =
59}
60
61Region *AnalysisState::getEnclosingRepetitiveRegion(
62 Value value, const BufferizationOptions &options) {
63 if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
64 iter != enclosingRepetitiveRegionCache.end())
65 return iter->second;
66
67 Region *region = value.getParentRegion();
68 // Collect all visited regions since we only know the repetitive region we
69 // want to map it to later on
70 SmallVector<Region *> visitedRegions;
71 while (region) {
72 visitedRegions.push_back(region);
73 if (isRepetitiveRegion(region, options))
74 break;
75 region = region->getParentRegion();
76 }
77 enclosingRepetitiveRegionCache[value] = region;
78 for (Region *r : visitedRegions)
79 enclosingRepetitiveRegionCache[r] = region;
80 return region;
81}
82
83Region *AnalysisState::getEnclosingRepetitiveRegion(
84 Block *block, const BufferizationOptions &options) {
85 if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
86 iter != enclosingRepetitiveRegionCache.end())
87 return iter->second;
88
89 Region *region = block->getParent();
90 Operation *op = nullptr;
91 // Collect all visited regions since we only know the repetitive region we
92 // want to map it to later on
93 SmallVector<Region *> visitedRegions;
94 do {
95 op = region->getParentOp();
96 if (isRepetitiveRegion(region, options))
97 break;
98 } while ((region = op->getParentRegion()));
99
100 enclosingRepetitiveRegionCache[block] = region;
101 for (Region *r : visitedRegions)
102 enclosingRepetitiveRegionCache[r] = region;
103 return region;
104}
105
106bool AnalysisState::insideMutuallyExclusiveRegions(Operation *op0,
107 Operation *op1) {
108 auto key = std::make_pair(op0, op1);
109 if (auto iter = insideMutuallyExclusiveRegionsCache.find(key);
110 iter != insideMutuallyExclusiveRegionsCache.end())
111 return iter->second;
113 // Populate results for both orderings of the ops.
114 insideMutuallyExclusiveRegionsCache[key] = result;
115 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] = result;
116 return result;
117}
118
119void AnalysisState::resetCache() {
120 enclosingRepetitiveRegionCache.clear();
121 insideMutuallyExclusiveRegionsCache.clear();
122}
123
124SymbolTableCollection &BufferizationState::getSymbolTables() {
125 return symbolTables;
126}
127
128Region *bufferization::getNextEnclosingRepetitiveRegion(
129 Region *region, const BufferizationOptions &options) {
130 assert(isRepetitiveRegion(region, options) && "expected repetitive region");
131 while ((region = region->getParentRegion())) {
132 if (isRepetitiveRegion(region, options))
133 break;
134 }
135 return region;
136}
137
138Region *bufferization::getParallelRegion(Region *region,
139 const BufferizationOptions &options) {
140 while (region) {
141 auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
142 if (bufferizableOp &&
143 bufferizableOp.isParallelRegion(region->getRegionNumber())) {
144 assert(isRepetitiveRegion(region, options) &&
145 "expected that all parallel regions are also repetitive regions");
146 return region;
147 }
148 region = region->getParentRegion();
149 }
150 return nullptr;
151}
152
153Operation *bufferization::getOwnerOfValue(Value value) {
154 if (auto opResult = llvm::dyn_cast<OpResult>(value))
155 return opResult.getDefiningOp();
156 return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
157}
158
159/// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
160/// shaped value is copied. Otherwise, a tensor with undefined contents is
161/// allocated.
162FailureOr<Value> bufferization::allocateTensorForShapedValue(
163 OpBuilder &b, Location loc, Value shapedValue,
164 const BufferizationOptions &options, const BufferizationState &state,
165 bool copy) {
167 if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
168 tensor = shapedValue;
169 } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
170 tensor = ToTensorOp::create(
171 b, loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()),
172 shapedValue);
173 } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
174 llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
175 return getOwnerOfValue(shapedValue)
176 ->emitError("copying of unranked tensors is not implemented");
177 } else {
178 llvm_unreachable("expected RankedTensorType or MemRefType");
179 }
180 RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
181 SmallVector<Value> dynamicSizes;
182 if (!copy) {
183 // Compute the dynamic part of the shape.
184 // First try to query the shape via ReifyRankedShapedTypeOpInterface.
185 bool reifiedShapes = false;
186 if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
187 llvm::isa<OpResult>(shapedValue)) {
189 if (succeeded(
190 reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
191 reifiedShapes = true;
192 auto &shape =
193 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
194 for (const auto &dim : enumerate(tensorType.getShape())) {
195 if (ShapedType::isDynamic(dim.value())) {
196 dynamicSizes.push_back(
197 getValueOrCreateConstantIndexOp(b, loc, shape[dim.index()]));
198 }
199 }
200 }
201 }
202
203 // If the shape could not be reified, create DimOps.
204 if (!reifiedShapes)
205 populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
206 }
207
208 // Create AllocTensorOp.
209 auto allocTensorOp = AllocTensorOp::create(b, loc, tensorType, dynamicSizes,
210 copy ? tensor : Value());
211
212 // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
213 if (copy)
214 return allocTensorOp.getResult();
215 auto copyBufferType =
216 detail::asMemRefType(getBufferType(tensor, options, state));
217 if (failed(copyBufferType))
218 return failure();
219 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
220 if (!memorySpace)
221 memorySpace = options.defaultMemorySpaceFn(tensorType);
222 if (memorySpace.has_value())
223 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
224 return allocTensorOp.getResult();
225}
226
227LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
228 RewriterBase &rewriter, const AnalysisState &analysisState,
229 const BufferizationState &bufferizationState) {
230 OpBuilder::InsertionGuard g(rewriter);
231 Operation *op = getOperation();
232 SmallVector<OpOperand *> outOfPlaceOpOperands;
233 DenseSet<OpOperand *> copiedOpOperands;
234 SmallVector<Value> outOfPlaceValues;
235 DenseSet<Value> copiedOpValues;
236
237 // Find all out-of-place OpOperands.
238 for (OpOperand &opOperand : op->getOpOperands()) {
239 Type operandType = opOperand.get().getType();
240 if (!llvm::isa<TensorType>(operandType))
241 continue;
242 if (analysisState.isInPlace(opOperand))
243 continue;
244 if (llvm::isa<UnrankedTensorType>(operandType))
245 return op->emitError("copying of unranked tensors is not implemented");
246
247 AliasingValueList aliasingValues =
248 analysisState.getAliasingValues(opOperand);
249 if (aliasingValues.getNumAliases() == 1 &&
250 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
251 !analysisState.bufferizesToMemoryWrite(opOperand) &&
252 analysisState
253 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
254 .getNumAliases() == 1 &&
255 !isa<UnrankedTensorType>(
256 aliasingValues.getAliases()[0].value.getType())) {
257 // The op itself does not write but may create exactly one alias. Instead
258 // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
259 // be smaller than the OpOperand (e.g., in the case of an extract_slice,
260 // where the result is usually a smaller part of the source). Do not apply
261 // this optimization if the OpResult is an unranked tensor (because those
262 // cannot be copied at the moment).
263 Value value = aliasingValues.getAliases()[0].value;
264 outOfPlaceValues.push_back(value);
265 if (!analysisState.canOmitTensorCopy(opOperand))
266 copiedOpValues.insert(value);
267 } else {
268 // In all other cases, make a copy of the OpOperand.
269 outOfPlaceOpOperands.push_back(&opOperand);
270 if (!analysisState.canOmitTensorCopy(opOperand))
271 copiedOpOperands.insert(&opOperand);
272 }
273 }
274
275 // Insert copies of OpOperands.
276 rewriter.setInsertionPoint(op);
277 for (OpOperand *opOperand : outOfPlaceOpOperands) {
278 FailureOr<Value> copy = allocateTensorForShapedValue(
279 rewriter, op->getLoc(), opOperand->get(), analysisState.getOptions(),
280 bufferizationState, copiedOpOperands.contains(opOperand));
281 if (failed(copy))
282 return failure();
283 rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
284 }
285
286 // Insert copies of Values.
287 rewriter.setInsertionPointAfter(op);
288 for (Value value : outOfPlaceValues) {
289 FailureOr<Value> copy = allocateTensorForShapedValue(
290 rewriter, op->getLoc(), value, analysisState.getOptions(),
291 bufferizationState, copiedOpValues.count(value));
292 if (failed(copy))
293 return failure();
294 SmallVector<OpOperand *> uses = llvm::to_vector(
295 llvm::map_range(value.getUses(), [](OpOperand &use) { return &use; }));
296 for (OpOperand *use : uses) {
297 // Do not update the alloc_tensor op that we just created.
298 if (use->getOwner() == copy->getDefiningOp())
299 continue;
300 // tensor.dim ops may have been created to be used as alloc_tensor op
301 // dynamic extents. Do not update these either.
302 if (isa<tensor::DimOp>(use->getOwner()))
303 continue;
304 rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
305 }
306 }
307
308 return success();
309}
310
311//===----------------------------------------------------------------------===//
312// OpFilter
313//===----------------------------------------------------------------------===//
314
315bool OpFilter::isOpAllowed(Operation *op) const {
316 // All other ops: Allow/disallow according to filter.
317 bool isAllowed = !hasAllowRule();
318 for (const Entry &entry : entries) {
319 bool filterResult = entry.fn(op);
320 switch (entry.type) {
321 case Entry::ALLOW:
322 isAllowed |= filterResult;
323 break;
324 case Entry::DENY:
325 if (filterResult)
326 // DENY filter matches. This op is no allowed. (Even if other ALLOW
327 // filters may match.)
328 return false;
329 };
330 }
331 return isAllowed;
332}
333
334//===----------------------------------------------------------------------===//
335// BufferizationOptions
336//===----------------------------------------------------------------------===//
337
338namespace {
339
340/// Default function arg type converter: Use a fully dynamic layout map.
341BufferLikeType
342defaultFunctionArgTypeConverter(TensorLikeType type, Attribute memorySpace,
343 func::FuncOp funcOp,
344 const BufferizationOptions &options) {
345 if (auto tensorType = mlir::dyn_cast<TensorType>(type)) {
346 return cast<BufferLikeType>(
347 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
348 }
349
350 // If not builtin, fallback to TensorLikeType::getBufferType()
351 auto bufferType =
352 type.getBufferType(options, [&]() { return funcOp->emitError(); });
353 assert(succeeded(bufferType) &&
354 "a valid buffer is always expected at function boundary");
355 return *bufferType;
356}
357/// Default unknown type converter: Use a fully dynamic layout map.
359defaultUnknownTypeConverter(TensorType tensorType, Attribute memorySpace,
360 const BufferizationOptions &options) {
361 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
362}
363
364} // namespace
365
366// Default constructor for BufferizationOptions.
367BufferizationOptions::BufferizationOptions()
368 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
369 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
370
371bool BufferizationOptions::isOpAllowed(Operation *op) const {
372 // Special case: If function boundary bufferization is deactivated, do not
373 // allow ops that belong to the `func` dialect.
374 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
375 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
376 return false;
377
378 return opFilter.isOpAllowed(op);
379}
380
381BufferizableOpInterface
382BufferizationOptions::dynCastBufferizableOp(Operation *op) const {
383 if (!isOpAllowed(op))
384 return nullptr;
385 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
386 if (!bufferizableOp)
387 return nullptr;
388 return bufferizableOp;
389}
390
391BufferizableOpInterface
392BufferizationOptions::dynCastBufferizableOp(Value value) const {
393 return dynCastBufferizableOp(getOwnerOfValue(value));
394}
395
396void BufferizationOptions::setFunctionBoundaryTypeConversion(
397 LayoutMapOption layoutMapOption) {
398 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
399 func::FuncOp funcOp,
400 const BufferizationOptions &options) {
401 if (auto tensorType = mlir::dyn_cast<TensorType>(type)) {
402 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
403 return cast<BufferLikeType>(
404 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
405 memorySpace));
406 return cast<BufferLikeType>(
407 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
408 memorySpace));
409 }
410
411 // If not builtin, fallback to TensorLikeType::getBufferType()
412 auto bufferType =
413 type.getBufferType(options, [&]() { return funcOp->emitError(); });
414 assert(succeeded(bufferType) &&
415 "a valid buffer is always expected at function boundary");
416 return *bufferType;
417 };
418 inferFunctionResultLayout =
419 layoutMapOption == LayoutMapOption::InferLayoutMap;
420}
421
422//===----------------------------------------------------------------------===//
423// Helper functions for BufferizableOpInterface
424//===----------------------------------------------------------------------===//
425
427 if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
428 b.setInsertionPointToStart(bbArg.getOwner());
429 } else {
430 b.setInsertionPointAfter(value.getDefiningOp());
431 }
432}
433
434/// Determine which OpOperand* will alias with `value` if the op is bufferized
435/// in place. Return all tensor OpOperand* if the op is not bufferizable.
436AliasingOpOperandList AnalysisState::getAliasingOpOperands(Value value) const {
437 if (Operation *op = getOwnerOfValue(value))
438 if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
439 return bufferizableOp.getAliasingOpOperands(value, *this);
440
441 // The op is not bufferizable.
442 return detail::unknownGetAliasingOpOperands(value);
443}
444
445/// Determine which Values will alias with `opOperand` if the op is bufferized
446/// in place. Return all tensor Values if the op is not bufferizable.
447AliasingValueList AnalysisState::getAliasingValues(OpOperand &opOperand) const {
448 if (auto bufferizableOp =
449 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
450 return bufferizableOp.getAliasingValues(opOperand, *this);
451
452 // The op is not bufferizable.
453 return detail::unknownGetAliasingValues(opOperand);
454}
455
456/// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
457/// op is not bufferizable.
458bool AnalysisState::bufferizesToMemoryRead(OpOperand &opOperand) const {
459 if (auto bufferizableOp =
460 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
461 return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
462
463 // Unknown op that returns a tensor. The inplace analysis does not support it.
464 // Conservatively return true.
465 return true;
466}
467
468/// Return true if `opOperand` bufferizes to a memory write. Return
469/// `true` if the op is not bufferizable.
470bool AnalysisState::bufferizesToMemoryWrite(OpOperand &opOperand) const {
471 if (auto bufferizableOp =
472 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
473 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
474
475 // Unknown op that returns a tensor. The inplace analysis does not support it.
476 // Conservatively return true.
477 return true;
478}
479
480/// Return true if `opOperand` does neither read nor write but bufferizes to an
481/// alias. Return false if the op is not bufferizable.
482bool AnalysisState::bufferizesToAliasOnly(OpOperand &opOperand) const {
483 if (auto bufferizableOp =
484 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
485 return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
486
487 // Unknown op that returns a tensor. The inplace analysis does not support it.
488 // Conservatively return false.
489 return false;
490}
491
492bool AnalysisState::bufferizesToMemoryWrite(Value value) const {
493 auto opResult = llvm::dyn_cast<OpResult>(value);
494 if (!opResult)
495 return true;
496 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
497 if (!bufferizableOp)
498 return true;
499 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
500}
501
502/// Return true if the given value is read by an op that bufferizes to a memory
503/// read. Also takes into account ops that create an alias but do not read by
504/// themselves (e.g., ExtractSliceOp).
505bool AnalysisState::isValueRead(Value value) const {
506 assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
507 SmallVector<OpOperand *> workingSet;
508 DenseSet<OpOperand *> visited;
509 for (OpOperand &use : value.getUses())
510 workingSet.push_back(&use);
511
512 while (!workingSet.empty()) {
513 OpOperand *uMaybeReading = workingSet.pop_back_val();
514 if (!visited.insert(uMaybeReading).second)
515 continue;
516
517 // Skip over all ops that neither read nor write (but create an alias).
518 if (bufferizesToAliasOnly(*uMaybeReading))
519 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
520 for (OpOperand &use : alias.value.getUses())
521 workingSet.push_back(&use);
522 if (bufferizesToMemoryRead(*uMaybeReading))
523 return true;
524 }
525
526 return false;
527}
528
529// Starting from `opOperand`, follow the use-def chain in reverse, always
530// selecting the aliasing OpOperands. Find and return Values for which
531// `condition` evaluates to true. Uses of such matching Values are not
532// traversed any further, the visited aliasing opOperands will be preserved
533// through `visitedOpOperands`.
534llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
535 OpOperand *opOperand, llvm::function_ref<bool(Value)> condition,
536 TraversalConfig config,
537 llvm::DenseSet<OpOperand *> *visitedOpOperands) const {
538 llvm::DenseSet<Value> visited;
539 llvm::SetVector<Value> result, workingSet;
540 workingSet.insert(opOperand->get());
541
542 if (visitedOpOperands)
543 visitedOpOperands->insert(opOperand);
544
545 while (!workingSet.empty()) {
546 Value value = workingSet.pop_back_val();
547
548 if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
549 // Stop traversal if value was already visited.
550 if (config.alwaysIncludeLeaves)
551 result.insert(value);
552 continue;
553 }
554 visited.insert(value);
555
556 if (condition(value)) {
557 result.insert(value);
558 continue;
559 }
560
561 if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
562 // Stop iterating if `followUnknownOps` is unset and the op is either
563 // not bufferizable or excluded in the OpFilter.
564 if (config.alwaysIncludeLeaves)
565 result.insert(value);
566 continue;
567 }
568
569 AliasingOpOperandList aliases = getAliasingOpOperands(value);
570 if (aliases.getNumAliases() == 0) {
571 // The traversal ends naturally if there are no more OpOperands that
572 // could be followed.
573 if (config.alwaysIncludeLeaves)
574 result.insert(value);
575 continue;
576 }
577
578 for (AliasingOpOperand a : aliases) {
579 if (config.followEquivalentOnly &&
580 a.relation != BufferRelation::Equivalent) {
581 // Stop iterating if `followEquivalentOnly` is set but the alias is not
582 // equivalent.
583 if (config.alwaysIncludeLeaves)
584 result.insert(value);
585 continue;
586 }
587
588 if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
589 // Stop iterating if `followInPlaceOnly` is set but the alias is
590 // out-of-place.
591 if (config.alwaysIncludeLeaves)
592 result.insert(value);
593 continue;
594 }
595
596 if (config.followSameTypeOrCastsOnly &&
597 a.opOperand->get().getType() != value.getType() &&
598 !value.getDefiningOp<CastOpInterface>()) {
599 // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
600 // has a different type and the op is not a cast.
601 if (config.alwaysIncludeLeaves)
602 result.insert(value);
603 continue;
604 }
605
606 workingSet.insert(a.opOperand->get());
607 if (visitedOpOperands)
608 visitedOpOperands->insert(a.opOperand);
609 }
610 }
611
612 return result;
613}
614
615// Find the values that define the contents of the given operand's value.
616llvm::SetVector<Value>
617AnalysisState::findDefinitions(OpOperand *opOperand) const {
618 TraversalConfig config;
619 config.alwaysIncludeLeaves = false;
620 return findValueInReverseUseDefChain(
621 opOperand, [&](Value v) { return this->bufferizesToMemoryWrite(v); },
622 config);
623}
624
625AnalysisState::AnalysisState(const BufferizationOptions &options)
627
629 : options(options), type(type) {
630 for (const BufferizationOptions::AnalysisStateInitFn &fn :
631 options.stateInitializers)
632 fn(*this);
633}
634
635bool AnalysisState::canOmitTensorCopy(OpOperand &opOperand) const {
636 // Do not copy if the tensor has undefined contents.
637 if (hasUndefinedContents(&opOperand))
638 return true;
639
640 // Do not copy if the buffer of the tensor is entirely overwritten (with
641 // values that do not depend on the old tensor).
642 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
643 return true;
644
645 // Do not copy if the tensor is never read.
646 AliasingValueList aliases = getAliasingValues(opOperand);
647 if (!bufferizesToMemoryRead(opOperand) &&
648 llvm::none_of(aliases,
649 [&](AliasingValue a) { return isValueRead(a.value); }))
650 return true;
651
652 // Default: Cannot omit the copy.
653 return false;
654}
655
656bool AnalysisState::isInPlace(OpOperand &opOperand) const {
657 // ToBufferOps are always in-place.
658 if (isa<ToBufferOp>(opOperand.getOwner()))
659 return true;
660
661 // In the absence of analysis information, OpOperands that bufferize to a
662 // memory write are out-of-place, i.e., an alloc and copy is inserted.
663 return !bufferizesToMemoryWrite(opOperand);
664}
665
666bool AnalysisState::areEquivalentBufferizedValues(Value v1, Value v2) const {
667 // In the absence of analysis information, we do not know if the values are
668 // equivalent. The conservative answer is "false".
669 return false;
670}
671
672bool AnalysisState::areAliasingBufferizedValues(Value v1, Value v2) const {
673 // In the absence of analysis information, we do not know if the values may be
674 // aliasing. The conservative answer is "true".
675 return true;
676}
677
678bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const {
679 // In the absence of analysis information, the conservative answer is "false".
680 return false;
681}
682
683FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
684 const BufferizationOptions &options,
685 const BufferizationState &state) {
686#ifndef NDEBUG
687 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.getType());
688 assert(tensorType && "unexpected non-tensor type");
689#endif // NDEBUG
690
691 // Replace "%t = to_tensor %m" with %m.
692 if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
693 return toTensorOp.getBuffer();
694
695 // Insert to_buffer op.
696 OpBuilder::InsertionGuard g(rewriter);
697 setInsertionPointAfter(rewriter, value);
698 FailureOr<BufferLikeType> bufferType = getBufferType(value, options, state);
699 if (failed(bufferType))
700 return failure();
701
702 return bufferization::ToBufferOp::create(rewriter, value.getLoc(),
703 *bufferType, value)
704 .getResult();
705}
706
707/// Return the buffer type for a given Value (tensor) after bufferization.
708FailureOr<BufferLikeType>
709bufferization::getBufferType(Value value, const BufferizationOptions &options,
710 const BufferizationState &state) {
711 SmallVector<Value> invocationStack;
712 return getBufferType(value, options, state, invocationStack);
713}
714
715/// Return the buffer type for a given Value (tensor) after bufferization.
716FailureOr<BufferLikeType>
717bufferization::getBufferType(Value value, const BufferizationOptions &options,
718 const BufferizationState &state,
719 SmallVector<Value> &invocationStack) {
720 assert(llvm::isa<TensorLikeType>(value.getType()) &&
721 "unexpected non-tensor type");
722 invocationStack.push_back(value);
723 auto popFromStack =
724 llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
725
726 // Try querying BufferizableOpInterface.
727 Operation *op = getOwnerOfValue(value);
728 auto bufferizableOp = options.dynCastBufferizableOp(op);
729 if (bufferizableOp)
730 return bufferizableOp.getBufferType(value, options, state, invocationStack);
731
732 // Op is not bufferizable.
733 return cast<TensorLikeType>(value.getType()).getBufferType(options, [&]() {
734 return op->emitError();
735 });
736}
737
738bool bufferization::hasTensorSemantics(Operation *op) {
739 if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
740 return bufferizableOp.hasTensorSemantics();
741 return detail::defaultHasTensorSemantics(op);
742}
743
744void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
745 Operation *op,
746 ValueRange values) {
747 assert(values.size() == op->getNumResults() &&
748 "expected one value per OpResult");
749 OpBuilder::InsertionGuard g(rewriter);
750
751 // Replace all OpResults with the given values.
752 SmallVector<Value> replacements;
753 for (OpResult opResult : op->getOpResults()) {
754 Value replacement = values[opResult.getResultNumber()];
755 if (llvm::isa<TensorLikeType>(opResult.getType())) {
756 // The OpResult is a tensor. Such values are replaced with memrefs during
757 // bufferization.
758 assert(llvm::isa<BufferLikeType>(replacement.getType()) &&
759 "tensor op result should be replaced with a buffer value");
760 // The existing uses of the OpResult still expect a tensor. Insert a
761 // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
762 // loose all of its users and eventually DCE away.
763 rewriter.setInsertionPointAfter(op);
764 replacement = bufferization::ToTensorOp::create(
765 rewriter, replacement.getLoc(), opResult.getType(), replacement);
766 }
767 replacements.push_back(replacement);
768 }
769
770 rewriter.replaceOp(op, replacements);
771}
772
773//===----------------------------------------------------------------------===//
774// Bufferization-specific scoped alloc insertion support.
775//===----------------------------------------------------------------------===//
776
777/// Create a memref allocation with the given type and dynamic extents.
778FailureOr<Value> BufferizationOptions::createAlloc(OpBuilder &b, Location loc,
779 MemRefType type,
780 ValueRange dynShape) const {
781 if (allocationFn)
782 return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
783
784 // Default bufferallocation via AllocOp.
785 if (bufferAlignment != 0)
786 return memref::AllocOp::create(b, loc, type, dynShape,
787 b.getI64IntegerAttr(bufferAlignment))
788 .getResult();
789 return memref::AllocOp::create(b, loc, type, dynShape).getResult();
790}
791
792/// Create a memory copy between two memref buffers.
793LogicalResult BufferizationOptions::createMemCpy(OpBuilder &b, Location loc,
794 Value from, Value to) const {
795 if (memCpyFn)
796 return (*memCpyFn)(b, loc, from, to);
797
798 memref::CopyOp::create(b, loc, from, to);
799 return success();
800}
801
802//===----------------------------------------------------------------------===//
803// Bufferization-specific IRMapping support with debugging.
804//===----------------------------------------------------------------------===//
805
806BaseMemRefType bufferization::getMemRefType(TensorType tensorType,
807 const BufferizationOptions &options,
808 MemRefLayoutAttrInterface layout,
809 Attribute memorySpace) {
810 // Case 1: Unranked memref type.
811 if (auto unrankedTensorType =
812 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
813 assert(!layout && "UnrankedTensorType cannot have a layout map");
814 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
815 memorySpace);
816 }
817
818 // Case 2: Ranked memref type with specified layout.
819 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
820 if (layout) {
821 return MemRefType::get(rankedTensorType.getShape(),
822 rankedTensorType.getElementType(), layout,
823 memorySpace);
824 }
825
826 return options.unknownTypeConverterFn(tensorType, memorySpace, options);
827}
828
830bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
831 Attribute memorySpace) {
832 // Case 1: Unranked memref type.
833 if (auto unrankedTensorType =
834 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
835 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
836 memorySpace);
837 }
838
839 // Case 2: Ranked memref type.
840 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
841 int64_t dynamicOffset = ShapedType::kDynamic;
842 SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
843 ShapedType::kDynamic);
844 auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
845 dynamicOffset, dynamicStrides);
846 return MemRefType::get(rankedTensorType.getShape(),
847 rankedTensorType.getElementType(), stridedLayout,
848 memorySpace);
849}
850
851/// Return a MemRef type with a static identity layout (i.e., no layout map). If
852/// the given tensor type is unranked, return an unranked MemRef type.
854bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
855 Attribute memorySpace) {
856 // Case 1: Unranked memref type.
857 if (auto unrankedTensorType =
858 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
859 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
860 memorySpace);
861 }
862
863 // Case 2: Ranked memref type.
864 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
865 MemRefLayoutAttrInterface layout = {};
866 return MemRefType::get(rankedTensorType.getShape(),
867 rankedTensorType.getElementType(), layout,
868 memorySpace);
869}
870
871//===----------------------------------------------------------------------===//
872// Default implementations of interface methods
873//===----------------------------------------------------------------------===//
874
875bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
876 OpResult opResult, const AnalysisState &state) {
877 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
878 AliasingOpOperandList opOperands =
879 bufferizableOp.getAliasingOpOperands(opResult, state);
880
881 // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
882 // memory writes.
883 if (opOperands.getAliases().empty())
884 return true;
885
886 // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
887 // may bufferize to a memory write.
888 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
889 return state.bufferizesToMemoryWrite(*alias.opOperand);
890 }))
891 return true;
892
893 // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
894 // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
895 // case, the OpResult bufferizes to a memory write. E.g.:
896 //
897 // %0 = "some_writing_op" : tensor<?xf32>
898 // %r = scf.if ... -> tensor<?xf32> {
899 // scf.yield %0 : tensor<?xf32>
900 // } else {
901 // %1 = "another_writing_op"(%0) : tensor<?xf32>
902 // scf.yield %1 : tensor<?xf32>
903 // }
904 // "some_reading_op"(%r)
905 //
906 // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
907 // bufferizes to a memory write and the defining op is inside the scf.if.
908 //
909 // Note: This treatment of surrouding ops is useful for ops that have a
910 // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
911 // the analysis considerably.
912 //
913 // "another_writing_op" in the above example should be able to bufferize
914 // inplace in the absence of another read of %0. However, if the scf.if op
915 // would not be considered a "write", the analysis would detect the
916 // following conflict:
917 //
918 // * read = some_reading_op
919 // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
920 // * conflictingWrite = %1
921 //
922 auto isMemoryWriteInsideOp = [&](Value v) {
923 Operation *op = getOwnerOfValue(v);
924 if (!opResult.getDefiningOp()->isAncestor(op))
925 return false;
926 return state.bufferizesToMemoryWrite(v);
927 };
928 TraversalConfig config;
929 config.alwaysIncludeLeaves = false;
930 for (AliasingOpOperand alias : opOperands) {
931 if (!state
932 .findValueInReverseUseDefChain(alias.opOperand,
933 isMemoryWriteInsideOp, config)
934 .empty())
935 return true;
936 }
937 return false;
938}
939
940// Compute the AliasingOpOperandList for a given Value based on
941// getAliasingValues.
942AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
943 Value value, const AnalysisState &state) {
944 Operation *op = getOwnerOfValue(value);
946 for (OpOperand &opOperand : op->getOpOperands()) {
947 if (!llvm::isa<TensorType>(opOperand.get().getType()))
948 continue;
949 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
950 for (const auto &it : aliasingValues)
951 if (it.value == value)
952 result.emplace_back(&opOperand, it.relation, it.isDefinite);
953 }
954 return AliasingOpOperandList(std::move(result));
955}
956
957FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
958 Value value, const BufferizationOptions &options,
959 const BufferizationState &bufferizationState,
960 SmallVector<Value> &invocationStack) {
961 assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
962 auto tensorType = cast<TensorType>(value.getType());
963
964 // No further analysis is possible for a block argument.
965 if (llvm::isa<BlockArgument>(value)) {
966 return cast<BufferLikeType>(
967 bufferization::getMemRefType(tensorType, options));
968 }
969
970 // Value is an OpResult.
971 Operation *op = getOwnerOfValue(value);
972 auto opResult = llvm::cast<OpResult>(value);
973 AnalysisState analysisState(options);
974 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
975 if (aliases.getNumAliases() > 0 &&
976 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
977 // If the OpResult has an equivalent OpOperand, both OpResult and
978 // OpOperand bufferize to the exact same buffer type.
979 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
980 return getBufferType(equivalentOperand, options, bufferizationState,
981 invocationStack);
982 }
983
984 // If we do not know the memory space and there is no default memory space,
985 // report a failure.
986 auto memSpace =
987 options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
988 if (!memSpace.has_value())
989 return op->emitError("could not infer memory space");
990
991 return cast<BufferLikeType>(
992 getMemRefType(tensorType, options, /*layout=*/{}, *memSpace));
993}
994
995bool bufferization::detail::defaultIsRepetitiveRegion(
996 BufferizableOpInterface bufferizableOp, unsigned index) {
997 assert(index < bufferizableOp->getNumRegions() && "invalid region index");
998 auto regionInterface =
999 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1000 if (!regionInterface)
1001 return false;
1002 return regionInterface.isRepetitiveRegion(index);
1003}
1004
1005AliasingOpOperandList
1006bufferization::detail::unknownGetAliasingOpOperands(Value value) {
1007 // TODO: Take into account successor blocks.
1008 // No aliasing in case of non-entry blocks.
1009 if (auto bbArg = dyn_cast<BlockArgument>(value))
1010 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1011 return {};
1012
1013 // Unknown op: Conservatively assume that each OpResult may alias with every
1014 // OpOperand. In addition, each block argument of an entry block may alias
1015 // with every OpOperand.
1016 AliasingOpOperandList r;
1017 for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
1018 if (isa<TensorType>(operand.get().getType()))
1019 r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
1020 return r;
1021}
1022
1023AliasingValueList
1024bufferization::detail::unknownGetAliasingValues(OpOperand &opOperand) {
1025 // TODO: Take into account successor blocks.
1026 // Unknown op: Conservatively assume that each OpResult may alias with every
1027 // OpOperand. In addition, each block argument of an entry block may alias
1028 // with every OpOperand.
1029 AliasingValueList r;
1030 for (OpResult result : opOperand.getOwner()->getOpResults())
1031 if (llvm::isa<TensorType>(result.getType()))
1032 r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
1033 for (Region &region : opOperand.getOwner()->getRegions())
1034 if (!region.getBlocks().empty())
1035 for (BlockArgument bbArg : region.getBlocks().front().getArguments())
1036 if (isa<TensorType>(bbArg.getType()))
1037 r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
1038 return r;
1039}
1040
1041bool bufferization::detail::defaultHasTensorSemantics(Operation *op) {
1042 auto isaTensor = [](Type t) { return isa<TensorLikeType>(t); };
1043 bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
1044 return any_of(r.getBlocks(), [&](Block &b) {
1045 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1046 return isaTensor(bbArg.getType());
1047 });
1048 });
1049 });
1050 if (hasTensorBlockArgument)
1051 return true;
1052
1053 if (any_of(op->getResultTypes(), isaTensor))
1054 return true;
1055 return any_of(op->getOperandTypes(), isaTensor);
1056}
1057
1058FailureOr<BaseMemRefType>
1059bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1060 if (failed(bufferType))
1061 return failure();
1062 return cast<BaseMemRefType>(*bufferType);
1063}
1064
1065bool bufferization::detail::typesMatchAfterBufferization(Operation &op,
1066 Value tensor,
1067 Value buffer) {
1068 return mlir::succeeded(
1069 cast<TensorLikeType>(tensor.getType())
1070 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.getType()),
1071 [&]() { return op.emitError(); }));
1072}
return success()
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition TypeID.h:323
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
This class represents an argument of a Block.
Definition Value.h:309
Block represents an ordered list of Operations.
Definition Block.h:33
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition Block.cpp:27
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition Builders.h:348
This class helps build Operations.
Definition Builders.h:207
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition Builders.h:412
This class represents an operand of an operation.
Definition Value.h:257
This is a value defined by a result of an operation.
Definition Value.h:457
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition Operation.h:220
Block * getBlock()
Returns the operation block that contains this operation.
Definition Operation.h:213
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition Operation.h:234
MutableArrayRef< OpOperand > getOpOperands()
Definition Operation.h:383
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
Definition Operation.h:397
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition Operation.h:677
result_type_range getResultTypes()
Definition Operation.h:428
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition Operation.h:263
result_range getOpResults()
Definition Operation.h:420
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition Region.h:200
BlockListType & getBlocks()
Definition Region.h:45
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
This class provides an efficient unique identifier for a specific C++ type.
Definition TypeID.h:107
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition Value.h:188
Location getLoc() const
Return the location of this value.
Definition Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition Value.cpp:39
Operation * getOwner() const
Return the owner of this operand.
Definition UseDefLists.h:38
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition Matchers.h:344
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
Definition MemRefOps.cpp:60
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:561
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Definition LLVM.h:128
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition Utils.cpp:111
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...