22 #include "llvm/ADT/ScopeExit.h"
23 #include "llvm/Support/Debug.h"
30 namespace bufferization {
32 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
39 #define DEBUG_TYPE "bufferizable-op-interface"
40 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
41 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
44 using namespace bufferization;
49 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
59 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
60 iter != enclosingRepetitiveRegionCache.end())
62 return enclosingRepetitiveRegionCache[op] =
68 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
69 iter != enclosingRepetitiveRegionCache.end())
77 visitedRegions.push_back(region);
82 enclosingRepetitiveRegionCache[value] = region;
83 for (
Region *r : visitedRegions)
84 enclosingRepetitiveRegionCache[r] = region;
90 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
91 iter != enclosingRepetitiveRegionCache.end())
105 enclosingRepetitiveRegionCache[block] = region;
106 for (
Region *r : visitedRegions)
107 enclosingRepetitiveRegionCache[r] = region;
127 if (bufferizableOp &&
130 "expected that all parallel regions are also repetitive regions");
139 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
140 return opResult.getDefiningOp();
141 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
151 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
152 tensor = shapedValue;
153 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
154 tensor = b.
create<ToTensorOp>(loc, shapedValue);
155 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
156 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
158 ->
emitError(
"copying of unranked tensors is not implemented");
160 llvm_unreachable(
"expected RankedTensorType or MemRefType");
162 RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.
getType());
167 bool reifiedShapes =
false;
168 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
169 llvm::isa<OpResult>(shapedValue)) {
173 reifiedShapes =
true;
175 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
176 for (
const auto &dim :
enumerate(tensorType.getShape()))
177 if (ShapedType::isDynamic(dim.value()))
178 dynamicSizes.push_back(shape[dim.index()].get<
Value>());
188 auto allocTensorOp = b.
create<AllocTensorOp>(loc, tensorType, dynamicSizes,
195 if (failed(copyBufferType))
197 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
199 memorySpace =
options.defaultMemorySpaceFn(tensorType);
200 if (memorySpace.has_value())
201 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
202 return allocTensorOp.getResult();
205 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
216 Type operandType = opOperand.get().getType();
217 if (!llvm::isa<TensorType>(operandType))
219 if (state.isInPlace(opOperand))
221 if (llvm::isa<UnrankedTensorType>(operandType))
222 return op->
emitError(
"copying of unranked tensors is not implemented");
226 isa<OpResult>(aliasingValues.
getAliases()[0].value) &&
227 !state.bufferizesToMemoryWrite(opOperand) &&
228 state.getAliasingOpOperands(aliasingValues.
getAliases()[0].value)
229 .getNumAliases() == 1 &&
230 !isa<UnrankedTensorType>(
231 aliasingValues.
getAliases()[0].value.getType())) {
239 outOfPlaceValues.push_back(value);
240 if (!state.canOmitTensorCopy(opOperand))
241 copiedOpValues.insert(value);
244 outOfPlaceOpOperands.push_back(&opOperand);
245 if (!state.canOmitTensorCopy(opOperand))
246 copiedOpOperands.insert(&opOperand);
252 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
254 rewriter, op->
getLoc(), opOperand->get(), state.getOptions(),
255 copiedOpOperands.contains(opOperand));
263 for (
Value value : outOfPlaceValues) {
265 rewriter, op->
getLoc(), value, state.getOptions(),
266 copiedOpValues.count(value));
273 if (use->getOwner() ==
copy->getDefiningOp())
277 if (isa<tensor::DimOp>(use->getOwner()))
292 bool isAllowed = !hasAllowRule();
293 for (
const Entry &entry : entries) {
294 bool filterResult = entry.fn(op);
295 switch (entry.type) {
297 isAllowed |= filterResult;
318 FunctionOpInterface funcOp,
327 llvm::cast<TensorType>(value.
getType()), memorySpace);
334 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
335 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
340 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
347 BufferizableOpInterface
351 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
354 return bufferizableOp;
357 BufferizableOpInterface
363 LayoutMapOption layoutMapOption) {
365 FunctionOpInterface funcOp,
367 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
374 layoutMapOption == LayoutMapOption::InferLayoutMap;
382 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
393 if (
auto bufferizableOp =
getOptions().dynCastBufferizableOp(op))
394 return bufferizableOp.getAliasingOpOperands(value, *
this);
403 if (
auto bufferizableOp =
405 return bufferizableOp.getAliasingValues(opOperand, *
this);
414 if (
auto bufferizableOp =
416 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
426 if (
auto bufferizableOp =
428 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
438 if (
auto bufferizableOp =
440 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
448 auto opResult = llvm::dyn_cast<OpResult>(value);
454 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
461 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected TensorType");
465 workingSet.push_back(&use);
467 while (!workingSet.empty()) {
468 OpOperand *uMaybeReading = workingSet.pop_back_val();
469 if (!visited.insert(uMaybeReading).second)
475 for (
OpOperand &use : alias.value.getUses())
476 workingSet.push_back(&use);
493 workingSet.insert(value);
495 while (!workingSet.empty()) {
496 Value value = workingSet.pop_back_val();
501 result.insert(value);
504 visited.insert(value);
506 if (condition(value)) {
507 result.insert(value);
515 result.insert(value);
524 result.insert(value);
534 result.insert(value);
542 result.insert(value);
547 a.opOperand->get().getType() != value.
getType() &&
552 result.insert(value);
556 workingSet.insert(a.opOperand->get());
594 llvm::none_of(aliases,
604 if (isa<ToMemrefOp>(opOperand.
getOwner()))
632 auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.
getType());
633 assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
634 rankedTensorType.getRank()) &&
635 "to_memref would be invalid: mismatching ranks");
642 auto tensorType = llvm::dyn_cast<TensorType>(value.
getType());
643 assert(tensorType &&
"unexpected non-tensor type");
647 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
648 return toTensorOp.getMemref();
654 if (failed(memrefType))
658 .
create<bufferization::ToMemrefOp>(value.
getLoc(), *memrefType, value)
663 FailureOr<BaseMemRefType>
670 FailureOr<BaseMemRefType>
673 assert(llvm::isa<TensorType>(value.
getType()) &&
674 "unexpected non-tensor type");
675 invocationStack.push_back(value);
677 llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
681 auto bufferizableOp =
options.dynCastBufferizableOp(op);
683 return bufferizableOp.getBufferType(value,
options, invocationStack);
688 if (!memSpace.has_value())
689 return op->
emitError(
"could not infer memory space");
695 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
696 return bufferizableOp.hasTensorSemantics();
704 "expected one value per OpResult");
710 Value replacement = values[opResult.getResultNumber()];
711 if (llvm::isa<TensorType>(opResult.getType())) {
714 assert((llvm::isa<MemRefType>(replacement.
getType()) ||
715 llvm::isa<UnrankedMemRefType>(replacement.
getType())) &&
716 "tensor op result should be replaced with a memref value");
721 replacement = rewriter.
create<bufferization::ToTensorOp>(
722 replacement.
getLoc(), replacement);
724 replacements.push_back(replacement);
744 .
create<memref::AllocOp>(loc, type, dynShape,
747 return b.
create<memref::AllocOp>(loc, type, dynShape).getResult();
754 return (*
memCpyFn)(b, loc, from, to);
756 b.
create<memref::CopyOp>(loc, from, to);
766 MemRefLayoutAttrInterface layout,
768 auto tensorType = llvm::cast<TensorType>(value.
getType());
771 if (
auto unrankedTensorType =
772 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
773 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
779 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
782 rankedTensorType.getElementType(), layout,
786 return options.unknownTypeConverterFn(value, memorySpace,
options);
793 if (
auto unrankedTensorType =
794 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
800 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
801 int64_t dynamicOffset = ShapedType::kDynamic;
803 ShapedType::kDynamic);
805 dynamicOffset, dynamicStrides);
807 rankedTensorType.getElementType(), stridedLayout,
817 if (
auto unrankedTensorType =
818 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
824 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
825 MemRefLayoutAttrInterface layout = {};
827 rankedTensorType.getElementType(), layout,
837 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
839 bufferizableOp.getAliasingOpOperands(opResult, state);
849 return state.bufferizesToMemoryWrite(*alias.
opOperand);
882 auto isMemoryWriteInsideOp = [&](
Value v) {
886 return state.bufferizesToMemoryWrite(v);
893 isMemoryWriteInsideOp, config)
907 if (!llvm::isa<TensorType>(opOperand.get().getType()))
910 for (
const auto &it : aliasingValues)
911 if (it.value == value)
912 result.emplace_back(&opOperand, it.relation, it.isDefinite);
920 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
923 if (llvm::isa<BlockArgument>(value))
928 auto opResult = llvm::cast<OpResult>(value);
935 Value equivalentOperand = aliases.
getAliases().front().opOperand->get();
943 if (!memSpace.has_value())
944 return op->
emitError(
"could not infer memory space");
950 BufferizableOpInterface bufferizableOp,
unsigned index) {
951 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
952 auto regionInterface =
953 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
954 if (!regionInterface)
956 return regionInterface.isRepetitiveRegion(index);
963 if (
auto bbArg = dyn_cast<BlockArgument>(value))
964 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
972 if (isa<TensorType>(operand.get().getType()))
985 if (llvm::isa<TensorType>(result.getType()))
988 if (!region.getBlocks().empty())
989 for (
BlockArgument bbArg : region.getBlocks().front().getArguments())
990 if (isa<TensorType>(bbArg.getType()))
998 return any_of(r.getBlocks(), [&](Block &b) {
999 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1000 return isaTensor(bbArg.getType());
1004 if (hasTensorBlockArgument)
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Base class for generic analysis states.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IntegerAttr getI64IntegerAttr(int64_t value)
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Block * getBlock()
Returns the operation block that contains this operation.
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
operand_type_range getOperandTypes()
MutableArrayRef< OpOperand > getOpOperands()
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
size_t getNumAliases() const
ArrayRef< T > getAliases() const
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
SetVector< Value > findValueInReverseUseDefChain(Value value, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig()) const
Starting from value, follow the use-def chain in reverse, always selecting the aliasing OpOperands.
SetVector< Value > findDefinitions(Value value) const
Find the values that may define the contents of the given value at runtime.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
virtual void resetCache()
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
A maybe aliasing OpOperand.
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
std::optional< MemCpyFn > memCpyFn
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.
bool followUnknownOps
Specifies whether unknown/non-bufferizable/ops not included in the OpFilter of BufferizationOptions s...
bool alwaysIncludeLeaves
Specifies if leaves (that do not have further OpOperands to follow) should be returned even if they d...
bool followSameTypeOrCastsOnly
Specifies whether OpOperands with a different type that are not the result of a CastOpInterface op sh...
bool followInPlaceOnly
Specifies whether out-of-place/undecided OpOperands should be followed.
bool followEquivalentOnly
Specifies whether non-equivalent OpOperands should be followed.
bool revisitAlreadyVisitedValues
Specifies whether already visited values should be visited again.