MLIR 23.0.0git
BufferizableOpInterface.cpp
Go to the documentation of this file.
1//===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
15#include "mlir/IR/AsmState.h"
16#include "mlir/IR/Operation.h"
18#include "mlir/IR/Value.h"
20#include "llvm/ADT/ScopeExit.h"
21#include "llvm/ADT/SmallVectorExtras.h"
22
23//===----------------------------------------------------------------------===//
24// BufferizableOpInterface
25//===----------------------------------------------------------------------===//
26
27namespace mlir {
28namespace bufferization {
29
30#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
31
32} // namespace bufferization
33} // namespace mlir
34
35MLIR_DEFINE_EXPLICIT_TYPE_ID(mlir::bufferization::AnalysisState)
36
37#define DEBUG_TYPE "bufferizable-op-interface"
38
39using namespace mlir;
40using namespace bufferization;
41
42static bool isRepetitiveRegion(Region *region,
44 Operation *op = region->getParentOp();
45 if (auto bufferizableOp = options.dynCastBufferizableOp(op))
46 if (bufferizableOp.isRepetitiveRegion(region->getRegionNumber()))
47 return true;
48 return false;
49}
50
51Region *AnalysisState::getEnclosingRepetitiveRegion(
53 if (!op->getBlock())
54 return nullptr;
55 if (auto iter = enclosingRepetitiveRegionCache.find_as(op);
56 iter != enclosingRepetitiveRegionCache.end())
57 return iter->second;
58 return enclosingRepetitiveRegionCache[op] =
60}
61
62Region *AnalysisState::getEnclosingRepetitiveRegion(
63 Value value, const BufferizationOptions &options) {
64 if (auto iter = enclosingRepetitiveRegionCache.find_as(value);
65 iter != enclosingRepetitiveRegionCache.end())
66 return iter->second;
67
68 Region *region = value.getParentRegion();
69 // Collect all visited regions since we only know the repetitive region we
70 // want to map it to later on
71 SmallVector<Region *> visitedRegions;
72 while (region) {
73 visitedRegions.push_back(region);
74 if (isRepetitiveRegion(region, options))
75 break;
76 region = region->getParentRegion();
77 }
78 enclosingRepetitiveRegionCache[value] = region;
79 for (Region *r : visitedRegions)
80 enclosingRepetitiveRegionCache[r] = region;
81 return region;
82}
83
84Region *AnalysisState::getEnclosingRepetitiveRegion(
85 Block *block, const BufferizationOptions &options) {
86 if (auto iter = enclosingRepetitiveRegionCache.find_as(block);
87 iter != enclosingRepetitiveRegionCache.end())
88 return iter->second;
89
90 Region *region = block->getParent();
91 Operation *op = nullptr;
92 // Collect all visited regions since we only know the repetitive region we
93 // want to map it to later on
94 SmallVector<Region *> visitedRegions;
95 do {
96 op = region->getParentOp();
97 if (isRepetitiveRegion(region, options))
98 break;
99 } while ((region = op->getParentRegion()));
100
101 enclosingRepetitiveRegionCache[block] = region;
102 for (Region *r : visitedRegions)
103 enclosingRepetitiveRegionCache[r] = region;
104 return region;
105}
106
107bool AnalysisState::insideMutuallyExclusiveRegions(Operation *op0,
108 Operation *op1) {
109 auto key = std::make_pair(op0, op1);
110 if (auto iter = insideMutuallyExclusiveRegionsCache.find(key);
111 iter != insideMutuallyExclusiveRegionsCache.end())
112 return iter->second;
114 // Populate results for both orderings of the ops.
115 insideMutuallyExclusiveRegionsCache[key] = result;
116 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] = result;
117 return result;
118}
119
120void AnalysisState::resetCache() {
121 enclosingRepetitiveRegionCache.clear();
122 insideMutuallyExclusiveRegionsCache.clear();
123}
124
125SymbolTableCollection &BufferizationState::getSymbolTables() {
126 return symbolTables;
127}
128
129SymbolTableCollection &BufferizationState::getSymbolTables() const {
130 return symbolTables;
131}
132
133Region *bufferization::getNextEnclosingRepetitiveRegion(
134 Region *region, const BufferizationOptions &options) {
135 assert(isRepetitiveRegion(region, options) && "expected repetitive region");
136 while ((region = region->getParentRegion())) {
137 if (isRepetitiveRegion(region, options))
138 break;
139 }
140 return region;
141}
142
143Region *bufferization::getParallelRegion(Region *region,
144 const BufferizationOptions &options) {
145 while (region) {
146 auto bufferizableOp = options.dynCastBufferizableOp(region->getParentOp());
147 if (bufferizableOp &&
148 bufferizableOp.isParallelRegion(region->getRegionNumber())) {
149 assert(isRepetitiveRegion(region, options) &&
150 "expected that all parallel regions are also repetitive regions");
151 return region;
152 }
153 region = region->getParentRegion();
154 }
155 return nullptr;
156}
157
158Operation *bufferization::getOwnerOfValue(Value value) {
159 if (auto opResult = llvm::dyn_cast<OpResult>(value))
160 return opResult.getDefiningOp();
161 return llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
162}
163
164/// Create an AllocTensorOp for the given shaped value. If `copy` is set, the
165/// shaped value is copied. Otherwise, a tensor with undefined contents is
166/// allocated.
167FailureOr<Value> bufferization::allocateTensorForShapedValue(
168 OpBuilder &b, Location loc, Value shapedValue,
169 const BufferizationOptions &options, const BufferizationState &state,
170 bool copy) {
172 if (llvm::isa<RankedTensorType>(shapedValue.getType())) {
173 tensor = shapedValue;
174 } else if (llvm::isa<MemRefType>(shapedValue.getType())) {
175 tensor = ToTensorOp::create(
176 b, loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()),
177 shapedValue);
178 } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) ||
179 llvm::isa<UnrankedMemRefType>(shapedValue.getType())) {
180 return getOwnerOfValue(shapedValue)
181 ->emitError("copying of unranked tensors is not implemented");
182 } else {
183 llvm_unreachable("expected RankedTensorType or MemRefType");
184 }
185 RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.getType());
186 SmallVector<Value> dynamicSizes;
187 if (!copy) {
188 // Compute the dynamic part of the shape.
189 // First try to query the shape via ReifyRankedShapedTypeOpInterface.
190 bool reifiedShapes = false;
191 if (llvm::isa<RankedTensorType>(shapedValue.getType()) &&
192 llvm::isa<OpResult>(shapedValue)) {
194 if (succeeded(
195 reifyResultShapes(b, shapedValue.getDefiningOp(), resultDims))) {
196 reifiedShapes = true;
197 auto &shape =
198 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
199 for (const auto &dim : enumerate(tensorType.getShape())) {
200 if (ShapedType::isDynamic(dim.value())) {
201 dynamicSizes.push_back(
202 getValueOrCreateConstantIndexOp(b, loc, shape[dim.index()]));
203 }
204 }
205 }
206 }
207
208 // If the shape could not be reified, create DimOps.
209 if (!reifiedShapes)
210 populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
211 }
212
213 // Create AllocTensorOp.
214 auto allocTensorOp = AllocTensorOp::create(b, loc, tensorType, dynamicSizes,
215 copy ? tensor : Value());
216
217 // Add 'memory_space' attribute. Not needed if 'copy' operand is specified.
218 if (copy)
219 return allocTensorOp.getResult();
220 auto copyBufferType =
221 detail::asMemRefType(getBufferType(tensor, options, state));
222 if (failed(copyBufferType))
223 return failure();
224 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
225 if (!memorySpace)
226 memorySpace = options.defaultMemorySpaceFn(tensorType);
227 if (memorySpace.has_value())
228 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
229 return allocTensorOp.getResult();
230}
231
232LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
233 RewriterBase &rewriter, const AnalysisState &analysisState,
234 const BufferizationState &bufferizationState) {
235 OpBuilder::InsertionGuard g(rewriter);
236 Operation *op = getOperation();
237 SmallVector<OpOperand *> outOfPlaceOpOperands;
238 DenseSet<OpOperand *> copiedOpOperands;
239 SmallVector<Value> outOfPlaceValues;
240 DenseSet<Value> copiedOpValues;
241
242 // Find all out-of-place OpOperands.
243 for (OpOperand &opOperand : op->getOpOperands()) {
244 Type operandType = opOperand.get().getType();
245 if (!llvm::isa<TensorType>(operandType))
246 continue;
247 if (analysisState.isInPlace(opOperand))
248 continue;
249 if (llvm::isa<UnrankedTensorType>(operandType))
250 return op->emitError("copying of unranked tensors is not implemented");
251
252 AliasingValueList aliasingValues =
253 analysisState.getAliasingValues(opOperand);
254 if (aliasingValues.getNumAliases() == 1 &&
255 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
256 !analysisState.bufferizesToMemoryWrite(opOperand) &&
257 analysisState
258 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
259 .getNumAliases() == 1 &&
260 !isa<UnrankedTensorType>(
261 aliasingValues.getAliases()[0].value.getType())) {
262 // The op itself does not write but may create exactly one alias. Instead
263 // of copying the OpOperand, copy the OpResult. The OpResult can sometimes
264 // be smaller than the OpOperand (e.g., in the case of an extract_slice,
265 // where the result is usually a smaller part of the source). Do not apply
266 // this optimization if the OpResult is an unranked tensor (because those
267 // cannot be copied at the moment).
268 Value value = aliasingValues.getAliases()[0].value;
269 outOfPlaceValues.push_back(value);
270 if (!analysisState.canOmitTensorCopy(opOperand))
271 copiedOpValues.insert(value);
272 } else {
273 // In all other cases, make a copy of the OpOperand.
274 outOfPlaceOpOperands.push_back(&opOperand);
275 if (!analysisState.canOmitTensorCopy(opOperand))
276 copiedOpOperands.insert(&opOperand);
277 }
278 }
279
280 // Insert copies of OpOperands.
281 rewriter.setInsertionPoint(op);
282 for (OpOperand *opOperand : outOfPlaceOpOperands) {
283 FailureOr<Value> copy = allocateTensorForShapedValue(
284 rewriter, op->getLoc(), opOperand->get(), analysisState.getOptions(),
285 bufferizationState, copiedOpOperands.contains(opOperand));
286 if (failed(copy))
287 return failure();
288 rewriter.modifyOpInPlace(op, [&]() { opOperand->set(*copy); });
289 }
290
291 // Insert copies of Values.
292 rewriter.setInsertionPointAfter(op);
293 for (Value value : outOfPlaceValues) {
294 FailureOr<Value> copy = allocateTensorForShapedValue(
295 rewriter, op->getLoc(), value, analysisState.getOptions(),
296 bufferizationState, copiedOpValues.count(value));
297 if (failed(copy))
298 return failure();
299 SmallVector<OpOperand *> uses = llvm::map_to_vector(
300 value.getUses(), [](OpOperand &use) { return &use; });
301 for (OpOperand *use : uses) {
302 // Do not update the alloc_tensor op that we just created.
303 if (use->getOwner() == copy->getDefiningOp())
304 continue;
305 // tensor.dim ops may have been created to be used as alloc_tensor op
306 // dynamic extents. Do not update these either.
307 if (isa<tensor::DimOp>(use->getOwner()))
308 continue;
309 rewriter.modifyOpInPlace(use->getOwner(), [&]() { use->set(*copy); });
310 }
311 }
312
313 return success();
314}
315
316//===----------------------------------------------------------------------===//
317// OpFilter
318//===----------------------------------------------------------------------===//
319
320bool OpFilter::isOpAllowed(Operation *op) const {
321 // All other ops: Allow/disallow according to filter.
322 bool isAllowed = !hasAllowRule();
323 for (const Entry &entry : entries) {
324 bool filterResult = entry.fn(op);
325 switch (entry.type) {
326 case Entry::ALLOW:
327 isAllowed |= filterResult;
328 break;
329 case Entry::DENY:
330 if (filterResult)
331 // DENY filter matches. This op is no allowed. (Even if other ALLOW
332 // filters may match.)
333 return false;
334 };
335 }
336 return isAllowed;
337}
338
339//===----------------------------------------------------------------------===//
340// BufferizationOptions
341//===----------------------------------------------------------------------===//
342
343namespace {
344
345/// Default function arg type converter: Use a fully dynamic layout map.
346BufferLikeType
347defaultFunctionArgTypeConverter(TensorLikeType type, Attribute memorySpace,
348 func::FuncOp funcOp,
349 const BufferizationOptions &options) {
350 if (auto tensorType = mlir::dyn_cast<TensorType>(type)) {
351 return cast<BufferLikeType>(
352 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
353 }
354
355 // If not builtin, fallback to TensorLikeType::getBufferType()
356 auto bufferType =
357 type.getBufferType(options, [&]() { return funcOp->emitError(); });
358 assert(succeeded(bufferType) &&
359 "a valid buffer is always expected at function boundary");
360 return *bufferType;
361}
362/// Default unknown type converter: Use a fully dynamic layout map.
364defaultUnknownTypeConverter(TensorType tensorType, Attribute memorySpace,
365 const BufferizationOptions &options) {
366 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
367}
368
369} // namespace
370
371// Default constructor for BufferizationOptions.
372BufferizationOptions::BufferizationOptions()
373 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
374 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
375
376bool BufferizationOptions::isOpAllowed(Operation *op) const {
377 // Special case: If function boundary bufferization is deactivated, do not
378 // allow ops that belong to the `func` dialect.
379 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->getDialect());
380 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
381 return false;
382
383 return opFilter.isOpAllowed(op);
384}
385
386BufferizableOpInterface
387BufferizationOptions::dynCastBufferizableOp(Operation *op) const {
388 if (!isOpAllowed(op))
389 return nullptr;
390 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
391 if (!bufferizableOp)
392 return nullptr;
393 return bufferizableOp;
394}
395
396BufferizableOpInterface
397BufferizationOptions::dynCastBufferizableOp(Value value) const {
398 return dynCastBufferizableOp(getOwnerOfValue(value));
399}
400
401void BufferizationOptions::setFunctionBoundaryTypeConversion(
402 LayoutMapOption layoutMapOption) {
403 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
404 func::FuncOp funcOp,
405 const BufferizationOptions &options) {
406 if (auto tensorType = mlir::dyn_cast<TensorType>(type)) {
407 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
408 return cast<BufferLikeType>(
409 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
410 memorySpace));
411 return cast<BufferLikeType>(
412 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
413 memorySpace));
414 }
415
416 // If not builtin, fallback to TensorLikeType::getBufferType()
417 auto bufferType =
418 type.getBufferType(options, [&]() { return funcOp->emitError(); });
419 assert(succeeded(bufferType) &&
420 "a valid buffer is always expected at function boundary");
421 return *bufferType;
422 };
423 inferFunctionResultLayout =
424 layoutMapOption == LayoutMapOption::InferLayoutMap;
425}
426
427//===----------------------------------------------------------------------===//
428// Helper functions for BufferizableOpInterface
429//===----------------------------------------------------------------------===//
430
432 if (auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
433 b.setInsertionPointToStart(bbArg.getOwner());
434 } else {
435 b.setInsertionPointAfter(value.getDefiningOp());
436 }
437}
438
439/// Determine which OpOperand* will alias with `value` if the op is bufferized
440/// in place. Return all tensor OpOperand* if the op is not bufferizable.
441AliasingOpOperandList AnalysisState::getAliasingOpOperands(Value value) const {
442 if (Operation *op = getOwnerOfValue(value))
443 if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
444 return bufferizableOp.getAliasingOpOperands(value, *this);
445
446 // The op is not bufferizable.
447 return detail::unknownGetAliasingOpOperands(value);
448}
449
450/// Determine which Values will alias with `opOperand` if the op is bufferized
451/// in place. Return all tensor Values if the op is not bufferizable.
452AliasingValueList AnalysisState::getAliasingValues(OpOperand &opOperand) const {
453 if (auto bufferizableOp =
454 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
455 return bufferizableOp.getAliasingValues(opOperand, *this);
456
457 // The op is not bufferizable.
458 return detail::unknownGetAliasingValues(opOperand);
459}
460
461/// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
462/// op is not bufferizable.
463bool AnalysisState::bufferizesToMemoryRead(OpOperand &opOperand) const {
464 if (auto bufferizableOp =
465 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
466 return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
467
468 // Unknown op that returns a tensor. The inplace analysis does not support it.
469 // Conservatively return true.
470 return true;
471}
472
473/// Return true if `opOperand` bufferizes to a memory write. Return
474/// `true` if the op is not bufferizable.
475bool AnalysisState::bufferizesToMemoryWrite(OpOperand &opOperand) const {
476 if (auto bufferizableOp =
477 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
478 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
479
480 // Unknown op that returns a tensor. The inplace analysis does not support it.
481 // Conservatively return true.
482 return true;
483}
484
485/// Return true if `opOperand` does neither read nor write but bufferizes to an
486/// alias. Return false if the op is not bufferizable.
487bool AnalysisState::bufferizesToAliasOnly(OpOperand &opOperand) const {
488 if (auto bufferizableOp =
489 getOptions().dynCastBufferizableOp(opOperand.getOwner()))
490 return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
491
492 // Unknown op that returns a tensor. The inplace analysis does not support it.
493 // Conservatively return false.
494 return false;
495}
496
497bool AnalysisState::bufferizesToMemoryWrite(Value value) const {
498 auto opResult = llvm::dyn_cast<OpResult>(value);
499 if (!opResult)
500 return true;
501 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
502 if (!bufferizableOp)
503 return true;
504 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *this);
505}
506
507/// Return true if the given value is read by an op that bufferizes to a memory
508/// read. Also takes into account ops that create an alias but do not read by
509/// themselves (e.g., ExtractSliceOp).
510bool AnalysisState::isValueRead(Value value) const {
511 assert(llvm::isa<TensorType>(value.getType()) && "expected TensorType");
512 SmallVector<OpOperand *> workingSet;
513 DenseSet<OpOperand *> visited;
514 for (OpOperand &use : value.getUses())
515 workingSet.push_back(&use);
516
517 while (!workingSet.empty()) {
518 OpOperand *uMaybeReading = workingSet.pop_back_val();
519 if (!visited.insert(uMaybeReading).second)
520 continue;
521
522 // Skip over all ops that neither read nor write (but create an alias).
523 if (bufferizesToAliasOnly(*uMaybeReading))
524 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
525 for (OpOperand &use : alias.value.getUses())
526 workingSet.push_back(&use);
527 if (bufferizesToMemoryRead(*uMaybeReading))
528 return true;
529 }
530
531 return false;
532}
533
534// Starting from `opOperand`, follow the use-def chain in reverse, always
535// selecting the aliasing OpOperands. Find and return Values for which
536// `condition` evaluates to true. Uses of such matching Values are not
537// traversed any further, the visited aliasing opOperands will be preserved
538// through `visitedOpOperands`.
539llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
540 OpOperand *opOperand, llvm::function_ref<bool(Value)> condition,
541 TraversalConfig config,
542 llvm::DenseSet<OpOperand *> *visitedOpOperands) const {
543 llvm::DenseSet<Value> visited;
544 llvm::SetVector<Value> result, workingSet;
545 workingSet.insert(opOperand->get());
546
547 if (visitedOpOperands)
548 visitedOpOperands->insert(opOperand);
549
550 while (!workingSet.empty()) {
551 Value value = workingSet.pop_back_val();
552
553 if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
554 // Stop traversal if value was already visited.
555 if (config.alwaysIncludeLeaves)
556 result.insert(value);
557 continue;
558 }
559 visited.insert(value);
560
561 if (condition(value)) {
562 result.insert(value);
563 continue;
564 }
565
566 if (!config.followUnknownOps && !options.dynCastBufferizableOp(value)) {
567 // Stop iterating if `followUnknownOps` is unset and the op is either
568 // not bufferizable or excluded in the OpFilter.
569 if (config.alwaysIncludeLeaves)
570 result.insert(value);
571 continue;
572 }
573
574 AliasingOpOperandList aliases = getAliasingOpOperands(value);
575 if (aliases.getNumAliases() == 0) {
576 // The traversal ends naturally if there are no more OpOperands that
577 // could be followed.
578 if (config.alwaysIncludeLeaves)
579 result.insert(value);
580 continue;
581 }
582
583 for (AliasingOpOperand a : aliases) {
584 if (config.followEquivalentOnly &&
585 a.relation != BufferRelation::Equivalent) {
586 // Stop iterating if `followEquivalentOnly` is set but the alias is not
587 // equivalent.
588 if (config.alwaysIncludeLeaves)
589 result.insert(value);
590 continue;
591 }
592
593 if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
594 // Stop iterating if `followInPlaceOnly` is set but the alias is
595 // out-of-place.
596 if (config.alwaysIncludeLeaves)
597 result.insert(value);
598 continue;
599 }
600
601 if (config.followSameTypeOrCastsOnly &&
602 a.opOperand->get().getType() != value.getType() &&
603 !value.getDefiningOp<CastOpInterface>()) {
604 // Stop iterating if `followSameTypeOrCastsOnly` is set but the alias is
605 // has a different type and the op is not a cast.
606 if (config.alwaysIncludeLeaves)
607 result.insert(value);
608 continue;
609 }
610
611 workingSet.insert(a.opOperand->get());
612 if (visitedOpOperands)
613 visitedOpOperands->insert(a.opOperand);
614 }
615 }
616
617 return result;
618}
619
620// Find the values that define the contents of the given operand's value.
621llvm::SetVector<Value>
622AnalysisState::findDefinitions(OpOperand *opOperand) const {
623 TraversalConfig config;
624 config.alwaysIncludeLeaves = false;
625 return findValueInReverseUseDefChain(
626 opOperand, [&](Value v) { return this->bufferizesToMemoryWrite(v); },
627 config);
628}
629
630AnalysisState::AnalysisState(const BufferizationOptions &options)
632
634 : options(options), type(type) {
635 for (const BufferizationOptions::AnalysisStateInitFn &fn :
636 options.stateInitializers)
637 fn(*this);
638}
639
640bool AnalysisState::canOmitTensorCopy(OpOperand &opOperand) const {
641 // Do not copy if the tensor has undefined contents.
642 if (hasUndefinedContents(&opOperand))
643 return true;
644
645 // Do not copy if the buffer of the tensor is entirely overwritten (with
646 // values that do not depend on the old tensor).
647 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
648 return true;
649
650 // Do not copy if the tensor is never read.
651 AliasingValueList aliases = getAliasingValues(opOperand);
652 if (!bufferizesToMemoryRead(opOperand) &&
653 llvm::none_of(aliases,
654 [&](AliasingValue a) { return isValueRead(a.value); }))
655 return true;
656
657 // Default: Cannot omit the copy.
658 return false;
659}
660
661bool AnalysisState::isInPlace(OpOperand &opOperand) const {
662 // ToBufferOps are always in-place.
663 if (isa<ToBufferOp>(opOperand.getOwner()))
664 return true;
665
666 // In the absence of analysis information, OpOperands that bufferize to a
667 // memory write are out-of-place, i.e., an alloc and copy is inserted.
668 return !bufferizesToMemoryWrite(opOperand);
669}
670
671bool AnalysisState::areEquivalentBufferizedValues(Value v1, Value v2) const {
672 // In the absence of analysis information, we do not know if the values are
673 // equivalent. The conservative answer is "false".
674 return false;
675}
676
677bool AnalysisState::areAliasingBufferizedValues(Value v1, Value v2) const {
678 // In the absence of analysis information, we do not know if the values may be
679 // aliasing. The conservative answer is "true".
680 return true;
681}
682
683bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const {
684 // In the absence of analysis information, the conservative answer is "false".
685 return false;
686}
687
688FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
689 const BufferizationOptions &options,
690 const BufferizationState &state) {
691#ifndef NDEBUG
692 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.getType());
693 assert(tensorType && "unexpected non-tensor type");
694#endif // NDEBUG
695
696 // Replace "%t = to_tensor %m" with %m.
697 if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
698 return toTensorOp.getBuffer();
699
700 // Insert to_buffer op.
701 OpBuilder::InsertionGuard g(rewriter);
702 setInsertionPointAfter(rewriter, value);
703 FailureOr<BufferLikeType> bufferType = getBufferType(value, options, state);
704 if (failed(bufferType))
705 return failure();
706
707 return bufferization::ToBufferOp::create(rewriter, value.getLoc(),
708 *bufferType, value)
709 .getResult();
710}
711
712/// Return the buffer type for a given Value (tensor) after bufferization.
713FailureOr<BufferLikeType>
714bufferization::getBufferType(Value value, const BufferizationOptions &options,
715 const BufferizationState &state) {
716 SmallVector<Value> invocationStack;
717 return getBufferType(value, options, state, invocationStack);
718}
719
720/// Return the buffer type for a given Value (tensor) after bufferization.
721FailureOr<BufferLikeType>
722bufferization::getBufferType(Value value, const BufferizationOptions &options,
723 const BufferizationState &state,
724 SmallVector<Value> &invocationStack) {
725 assert(llvm::isa<TensorLikeType>(value.getType()) &&
726 "unexpected non-tensor type");
727 invocationStack.push_back(value);
728 llvm::scope_exit popFromStack([&]() { invocationStack.pop_back(); });
729
730 // Try querying BufferizableOpInterface.
731 Operation *op = getOwnerOfValue(value);
732 auto bufferizableOp = options.dynCastBufferizableOp(op);
733 if (bufferizableOp)
734 return bufferizableOp.getBufferType(value, options, state, invocationStack);
735
736 // Op is not bufferizable.
737 return cast<TensorLikeType>(value.getType()).getBufferType(options, [&]() {
738 return op->emitError();
739 });
740}
741
742bool bufferization::hasTensorSemantics(Operation *op) {
743 if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
744 return bufferizableOp.hasTensorSemantics();
745 return detail::defaultHasTensorSemantics(op);
746}
747
748void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
749 Operation *op,
750 ValueRange values) {
751 assert(values.size() == op->getNumResults() &&
752 "expected one value per OpResult");
753 OpBuilder::InsertionGuard g(rewriter);
754
755 // Replace all OpResults with the given values.
756 SmallVector<Value> replacements;
757 for (OpResult opResult : op->getOpResults()) {
758 Value replacement = values[opResult.getResultNumber()];
759 if (llvm::isa<TensorLikeType>(opResult.getType())) {
760 // The OpResult is a tensor. Such values are replaced with memrefs during
761 // bufferization.
762 assert(llvm::isa<BufferLikeType>(replacement.getType()) &&
763 "tensor op result should be replaced with a buffer value");
764 // The existing uses of the OpResult still expect a tensor. Insert a
765 // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
766 // loose all of its users and eventually DCE away.
767 rewriter.setInsertionPointAfter(op);
768 replacement = bufferization::ToTensorOp::create(
769 rewriter, replacement.getLoc(), opResult.getType(), replacement);
770 }
771 replacements.push_back(replacement);
772 }
773
774 rewriter.replaceOp(op, replacements);
775}
776
777//===----------------------------------------------------------------------===//
778// Bufferization-specific scoped alloc insertion support.
779//===----------------------------------------------------------------------===//
780
781/// Create a memref allocation with the given type and dynamic extents.
782FailureOr<Value> BufferizationOptions::createAlloc(OpBuilder &b, Location loc,
783 MemRefType type,
784 ValueRange dynShape) const {
785 if (allocationFn)
786 return (*allocationFn)(b, loc, type, dynShape, bufferAlignment);
787
788 // Default bufferallocation via AllocOp.
789 if (bufferAlignment != 0)
790 return memref::AllocOp::create(b, loc, type, dynShape,
791 b.getI64IntegerAttr(bufferAlignment))
792 .getResult();
793 return memref::AllocOp::create(b, loc, type, dynShape).getResult();
794}
795
796/// Create a memory copy between two memref buffers.
797LogicalResult BufferizationOptions::createMemCpy(OpBuilder &b, Location loc,
798 Value from, Value to) const {
799 if (memCpyFn)
800 return (*memCpyFn)(b, loc, from, to);
801
802 memref::CopyOp::create(b, loc, from, to);
803 return success();
804}
805
806//===----------------------------------------------------------------------===//
807// Bufferization-specific IRMapping support with debugging.
808//===----------------------------------------------------------------------===//
809
810BaseMemRefType bufferization::getMemRefType(TensorType tensorType,
811 const BufferizationOptions &options,
812 MemRefLayoutAttrInterface layout,
813 Attribute memorySpace) {
814 // Case 1: Unranked memref type.
815 if (auto unrankedTensorType =
816 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
817 assert(!layout && "UnrankedTensorType cannot have a layout map");
818 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
819 memorySpace);
820 }
821
822 // Case 2: Ranked memref type with specified layout.
823 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
824 if (layout) {
825 return MemRefType::get(rankedTensorType.getShape(),
826 rankedTensorType.getElementType(), layout,
827 memorySpace);
828 }
829
830 return options.unknownTypeConverterFn(tensorType, memorySpace, options);
831}
832
834bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
835 Attribute memorySpace) {
836 // Case 1: Unranked memref type.
837 if (auto unrankedTensorType =
838 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
839 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
840 memorySpace);
841 }
842
843 // Case 2: Ranked memref type.
844 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
845 int64_t dynamicOffset = ShapedType::kDynamic;
846 SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
847 ShapedType::kDynamic);
848 auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
849 dynamicOffset, dynamicStrides);
850 return MemRefType::get(rankedTensorType.getShape(),
851 rankedTensorType.getElementType(), stridedLayout,
852 memorySpace);
853}
854
855/// Return a MemRef type with a static identity layout (i.e., no layout map). If
856/// the given tensor type is unranked, return an unranked MemRef type.
858bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
859 Attribute memorySpace) {
860 // Case 1: Unranked memref type.
861 if (auto unrankedTensorType =
862 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
863 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
864 memorySpace);
865 }
866
867 // Case 2: Ranked memref type.
868 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
869 MemRefLayoutAttrInterface layout = {};
870 return MemRefType::get(rankedTensorType.getShape(),
871 rankedTensorType.getElementType(), layout,
872 memorySpace);
873}
874
875//===----------------------------------------------------------------------===//
876// Default implementations of interface methods
877//===----------------------------------------------------------------------===//
878
879bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
880 OpResult opResult, const AnalysisState &state) {
881 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
882 AliasingOpOperandList opOperands =
883 bufferizableOp.getAliasingOpOperands(opResult, state);
884
885 // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
886 // memory writes.
887 if (opOperands.getAliases().empty())
888 return true;
889
890 // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
891 // may bufferize to a memory write.
892 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
893 return state.bufferizesToMemoryWrite(*alias.opOperand);
894 }))
895 return true;
896
897 // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
898 // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
899 // case, the OpResult bufferizes to a memory write. E.g.:
900 //
901 // %0 = "some_writing_op" : tensor<?xf32>
902 // %r = scf.if ... -> tensor<?xf32> {
903 // scf.yield %0 : tensor<?xf32>
904 // } else {
905 // %1 = "another_writing_op"(%0) : tensor<?xf32>
906 // scf.yield %1 : tensor<?xf32>
907 // }
908 // "some_reading_op"(%r)
909 //
910 // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
911 // bufferizes to a memory write and the defining op is inside the scf.if.
912 //
913 // Note: This treatment of surrouding ops is useful for ops that have a
914 // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
915 // the analysis considerably.
916 //
917 // "another_writing_op" in the above example should be able to bufferize
918 // inplace in the absence of another read of %0. However, if the scf.if op
919 // would not be considered a "write", the analysis would detect the
920 // following conflict:
921 //
922 // * read = some_reading_op
923 // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
924 // * conflictingWrite = %1
925 //
926 auto isMemoryWriteInsideOp = [&](Value v) {
927 Operation *op = getOwnerOfValue(v);
928 if (!opResult.getDefiningOp()->isAncestor(op))
929 return false;
930 return state.bufferizesToMemoryWrite(v);
931 };
932 TraversalConfig config;
933 config.alwaysIncludeLeaves = false;
934 for (AliasingOpOperand alias : opOperands) {
935 if (!state
936 .findValueInReverseUseDefChain(alias.opOperand,
937 isMemoryWriteInsideOp, config)
938 .empty())
939 return true;
940 }
941 return false;
942}
943
944// Compute the AliasingOpOperandList for a given Value based on
945// getAliasingValues.
946AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
947 Value value, const AnalysisState &state) {
948 Operation *op = getOwnerOfValue(value);
950 for (OpOperand &opOperand : op->getOpOperands()) {
951 if (!llvm::isa<TensorType>(opOperand.get().getType()))
952 continue;
953 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
954 for (const auto &it : aliasingValues)
955 if (it.value == value)
956 result.emplace_back(&opOperand, it.relation, it.isDefinite);
957 }
958 return AliasingOpOperandList(std::move(result));
959}
960
961FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
962 Value value, const BufferizationOptions &options,
963 const BufferizationState &bufferizationState,
964 SmallVector<Value> &invocationStack) {
965 assert(llvm::isa<TensorType>(value.getType()) && "expected tensor type");
966 auto tensorType = cast<TensorType>(value.getType());
967
968 auto elementType = tensorType.getElementType();
969
970 if (!BaseMemRefType::isValidElementType(elementType))
971 return getOwnerOfValue(value)->emitError()
972 << "cannot bufferize value of type " << tensorType
973 << ": element type " << elementType
974 << " is not a valid memref element type";
975
976 // No further analysis is possible for a block argument.
977 if (llvm::isa<BlockArgument>(value)) {
978 return cast<BufferLikeType>(
979 bufferization::getMemRefType(tensorType, options));
980 }
981
982 // Value is an OpResult.
983 Operation *op = getOwnerOfValue(value);
984 auto opResult = llvm::cast<OpResult>(value);
985 AnalysisState analysisState(options);
986 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
987 if (aliases.getNumAliases() > 0 &&
988 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
989 // If the OpResult has an equivalent OpOperand, both OpResult and
990 // OpOperand bufferize to the exact same buffer type.
991 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
992 return getBufferType(equivalentOperand, options, bufferizationState,
993 invocationStack);
994 }
995
996 // If we do not know the memory space and there is no default memory space,
997 // report a failure.
998 auto memSpace =
999 options.defaultMemorySpaceFn(cast<TensorType>(value.getType()));
1000 if (!memSpace.has_value())
1001 return op->emitError("could not infer memory space");
1002
1003 return cast<BufferLikeType>(
1004 getMemRefType(tensorType, options, /*layout=*/{}, *memSpace));
1005}
1006
1007bool bufferization::detail::defaultIsRepetitiveRegion(
1008 BufferizableOpInterface bufferizableOp, unsigned index) {
1009 assert(index < bufferizableOp->getNumRegions() && "invalid region index");
1010 auto regionInterface =
1011 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1012 if (!regionInterface)
1013 return false;
1014 return regionInterface.isRepetitiveRegion(index);
1015}
1016
1017AliasingOpOperandList
1018bufferization::detail::unknownGetAliasingOpOperands(Value value) {
1019 // TODO: Take into account successor blocks.
1020 // No aliasing in case of non-entry blocks.
1021 if (auto bbArg = dyn_cast<BlockArgument>(value))
1022 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1023 return {};
1024
1025 // Unknown op: Conservatively assume that each OpResult may alias with every
1026 // OpOperand. In addition, each block argument of an entry block may alias
1027 // with every OpOperand.
1028 AliasingOpOperandList r;
1029 for (OpOperand &operand : value.getDefiningOp()->getOpOperands())
1030 if (isa<TensorType>(operand.get().getType()))
1031 r.addAlias({&operand, BufferRelation::Unknown, /*isDefinite=*/false});
1032 return r;
1033}
1034
1035AliasingValueList
1036bufferization::detail::unknownGetAliasingValues(OpOperand &opOperand) {
1037 // TODO: Take into account successor blocks.
1038 // Unknown op: Conservatively assume that each OpResult may alias with every
1039 // OpOperand. In addition, each block argument of an entry block may alias
1040 // with every OpOperand.
1041 AliasingValueList r;
1042 for (OpResult result : opOperand.getOwner()->getOpResults())
1043 if (llvm::isa<TensorType>(result.getType()))
1044 r.addAlias({result, BufferRelation::Unknown, /*isDefinite=*/false});
1045 for (Region &region : opOperand.getOwner()->getRegions())
1046 if (!region.getBlocks().empty())
1047 for (BlockArgument bbArg : region.getBlocks().front().getArguments())
1048 if (isa<TensorType>(bbArg.getType()))
1049 r.addAlias({bbArg, BufferRelation::Unknown, /*isDefinite=*/false});
1050 return r;
1051}
1052
1053bool bufferization::detail::defaultHasTensorSemantics(Operation *op) {
1054 auto isaTensor = [](Type t) { return isa<TensorLikeType>(t); };
1055 bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) {
1056 return any_of(r.getBlocks(), [&](Block &b) {
1057 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1058 return isaTensor(bbArg.getType());
1059 });
1060 });
1061 });
1062 if (hasTensorBlockArgument)
1063 return true;
1064
1065 if (any_of(op->getResultTypes(), isaTensor))
1066 return true;
1067 return any_of(op->getOperandTypes(), isaTensor);
1068}
1069
1070FailureOr<BaseMemRefType>
1071bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1072 if (failed(bufferType))
1073 return failure();
1074 return cast<BaseMemRefType>(*bufferType);
1075}
1076
1077bool bufferization::detail::typesMatchAfterBufferization(Operation &op,
1078 Value tensor,
1079 Value buffer) {
1080 return mlir::succeeded(
1081 cast<TensorLikeType>(tensor.getType())
1082 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.getType()),
1083 [&]() { return op.emitError(); }));
1084}
return success()
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Definition TypeID.h:323
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
This class represents an argument of a Block.
Definition Value.h:309
Block represents an ordered list of Operations.
Definition Block.h:33
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition Block.cpp:27
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition Builders.h:350
This class helps build Operations.
Definition Builders.h:209
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:400
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition Builders.h:414
This class represents an operand of an operation.
Definition Value.h:257
This is a value defined by a result of an operation.
Definition Value.h:457
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition Operation.h:220
Block * getBlock()
Returns the operation block that contains this operation.
Definition Operation.h:213
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition Operation.h:234
MutableArrayRef< OpOperand > getOpOperands()
Definition Operation.h:383
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
Definition Operation.h:397
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition Operation.h:677
result_type_range getResultTypes()
Definition Operation.h:428
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
Definition Operation.h:263
result_range getOpResults()
Definition Operation.h:420
Region * getParentRegion()
Returns the region to which the instruction belongs.
Definition Operation.h:230
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition Region.h:26
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
Definition Region.cpp:45
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Definition Region.cpp:62
Operation * getParentOp()
Return the parent operation this region is attached to.
Definition Region.h:200
BlockListType & getBlocks()
Definition Region.h:45
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Type getElementType() const
Returns the element type of this tensor type.
This class provides an efficient unique identifier for a specific C++ type.
Definition TypeID.h:107
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition Types.cpp:35
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Definition Value.h:188
Location getLoc() const
Return the location of this value.
Definition Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
Region * getParentRegion()
Return the Region in which this Value is defined.
Definition Value.cpp:39
Operation * getOwner() const
Return the owner of this operand.
Definition UseDefLists.h:38
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition Matchers.h:344
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
Definition MemRefOps.cpp:62
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:717
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Definition LLVM.h:120
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition Utils.cpp:112
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...