MLIR  14.0.0git
SCFInterfaceImpl.cpp
Go to the documentation of this file.
1 //===- SCFInterfaceImpl.cpp - SCF Impl. of BufferizableOpInterface --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
12 #include "mlir/Dialect/SCF/SCF.h"
13 #include "mlir/IR/Dialect.h"
14 #include "mlir/IR/Operation.h"
15 #include "mlir/IR/PatternMatch.h"
16 
17 using namespace mlir::bufferization;
18 
19 namespace mlir {
20 namespace linalg {
21 namespace comprehensive_bufferize {
22 namespace scf_ext {
23 
24 // bufferization.to_memref is not allowed to change the rank.
25 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
26 #ifndef NDEBUG
27  auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>();
28  assert((!rankedTensorType || (memrefType.cast<MemRefType>().getRank() ==
29  rankedTensorType.getRank())) &&
30  "to_memref would be invalid: mismatching ranks");
31 #endif
32 }
33 
34 /// Bufferization of scf.execute_region. Can be analyzed, but bufferization not
35 /// fully implemented at the moment.
37  : public BufferizableOpInterface::ExternalModel<ExecuteRegionOpInterface,
38  scf::ExecuteRegionOp> {
41  const BufferizationState &state) const {
42  // ExecuteRegionOps do not have tensor OpOperands. The yielded value can be
43  // any SSA value that is in scope. To allow for use-def chain traversal
44  // through ExecuteRegionOps in the analysis, the corresponding yield value
45  // is considered to be aliasing with the result.
46  auto executeRegionOp = cast<scf::ExecuteRegionOp>(op);
47  size_t resultNum = std::distance(op->getOpResults().begin(),
48  llvm::find(op->getOpResults(), opResult));
49  // TODO: Support multiple blocks.
50  assert(executeRegionOp.getRegion().getBlocks().size() == 1 &&
51  "expected exactly 1 block");
52  auto yieldOp = dyn_cast<scf::YieldOp>(
53  executeRegionOp.getRegion().front().getTerminator());
54  assert(yieldOp && "expected scf.yield terminator in scf.execute_region");
55  return {&yieldOp->getOpOperand(resultNum)};
56  }
57 
58  // TODO: For better bufferization results, this could return `true` only if
59  // there is a memory write in the region.
60  bool isMemoryWrite(Operation *op, OpResult opResult,
61  const BufferizationState &state) const {
62  // Similar to scf.if, results of this op are always considered memory writes
63  // in the analysis. This is a useful pattern for all ops that have tensor
64  // OpResults but no tensor OpOperands. By default, `isMemoryWrite` is
65  // implemented in terms of `bufferizesToMemoryWrite`, which does not work on
66  // ops without OpOperands.
67  return true;
68  }
69 
71  const BufferizationState &state) const {
72  auto executeRegionOp = cast<scf::ExecuteRegionOp>(op);
73 
74  // Compute new result types.
75  SmallVector<Type> newResultTypes;
76  for (Type type : executeRegionOp->getResultTypes()) {
77  if (auto tensorType = type.dyn_cast<TensorType>()) {
78  newResultTypes.push_back(getMemRefType(tensorType, state.getOptions()));
79  } else {
80  newResultTypes.push_back(type);
81  }
82  }
83 
84  // Create new op and move over region.
85  auto newOp =
86  rewriter.create<scf::ExecuteRegionOp>(op->getLoc(), newResultTypes);
87  newOp.getRegion().takeBody(executeRegionOp.getRegion());
88 
89  // Update terminator.
90  assert(newOp.getRegion().getBlocks().size() == 1 &&
91  "only 1 block supported");
92  Block *newBlock = &newOp.getRegion().front();
93  auto yieldOp = cast<scf::YieldOp>(newBlock->getTerminator());
94  rewriter.setInsertionPoint(yieldOp);
95  SmallVector<Value> newYieldValues;
96  for (auto it : llvm::enumerate(yieldOp.getResults())) {
97  Value val = it.value();
98  if (val.getType().isa<TensorType>()) {
99  newYieldValues.push_back(rewriter.create<bufferization::ToMemrefOp>(
100  yieldOp.getLoc(), newResultTypes[it.index()], val));
101  } else {
102  newYieldValues.push_back(val);
103  }
104  }
105  rewriter.replaceOpWithNewOp<scf::YieldOp>(yieldOp, newYieldValues);
106 
107  // Update all uses of the old op.
108  rewriter.setInsertionPointAfter(newOp);
109  SmallVector<Value> newResults;
110  for (auto it : llvm::enumerate(executeRegionOp->getResultTypes())) {
111  if (it.value().isa<TensorType>()) {
112  newResults.push_back(rewriter.create<bufferization::ToTensorOp>(
113  executeRegionOp.getLoc(), newOp->getResult(it.index())));
114  } else {
115  newResults.push_back(newOp->getResult(it.index()));
116  }
117  }
118 
119  // Replace old op.
120  rewriter.replaceOp(executeRegionOp, newResults);
121 
122  return success();
123  }
124 
126  const BufferizationState &state) const {
128  }
129 };
130 
131 /// Bufferization of scf.if. Replace with a new scf.if that yields memrefs.
133  : public BufferizableOpInterface::ExternalModel<IfOpInterface, scf::IfOp> {
136  const BufferizationState &state) const {
137  // IfOps do not have tensor OpOperands. The yielded value can be any SSA
138  // value that is in scope. To allow for use-def chain traversal through
139  // IfOps in the analysis, both corresponding yield values from the then/else
140  // branches are considered to be aliasing with the result.
141  auto ifOp = cast<scf::IfOp>(op);
142  size_t resultNum = std::distance(op->getOpResults().begin(),
143  llvm::find(op->getOpResults(), opResult));
144  return {&ifOp.thenYield()->getOpOperand(resultNum),
145  &ifOp.elseYield()->getOpOperand(resultNum)};
146  }
147 
148  // TODO: For better bufferization results, this could return `true` only if
149  // there is a memory write in one (or both) of the branches. Since this is not
150  // allowed at the moment, we should never encounter scf.ifs that yield
151  // unmodified tensors. Such scf.yield ops could just fold away.
152  bool isMemoryWrite(Operation *op, OpResult opResult,
153  const BufferizationState &state) const {
154  // IfOp results are always considered memory writes in the analysis. This
155  // design decision simplifies the analysis considerably. E.g., consider the
156  // following test case:
157  //
158  // %0 = "some_writing_op" : tensor<?xf32>
159  // %r = scf.if %c -> (tensor<?xf32>) {
160  // scf.yield %0
161  // } else {
162  // %1 = "another_writing_op"(%0) : tensor<?xf32>
163  // }
164  // "some_reading_op"(%r)
165  //
166  // "another_writing_op" in the above example should be able to bufferize
167  // inplace in the absence of another read of %0. However, if the scf.if op
168  // would not be considered a "write", the analysis would detect the
169  // following conflict:
170  //
171  // * read = some_reading_op
172  // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
173  // * conflictingWrite = %1
174  //
175  // For more details, check the "scf.IfOp" section of the design document.
176  return true;
177  }
178 
180  const BufferizationState &state) const {
181  auto ifOp = cast<scf::IfOp>(op);
182 
183  // Compute new types of the bufferized scf.if op.
184  SmallVector<Type> newTypes;
185  for (Type returnType : ifOp->getResultTypes()) {
186  if (auto tensorType = returnType.dyn_cast<TensorType>()) {
187  newTypes.push_back(getMemRefType(tensorType, state.getOptions()));
188  } else {
189  newTypes.push_back(returnType);
190  }
191  }
192 
193  // Create new op.
194  auto newIfOp =
195  rewriter.create<scf::IfOp>(ifOp.getLoc(), newTypes, ifOp.getCondition(),
196  /*withElseRegion=*/true);
197 
198  // Remove terminators.
199  if (!newIfOp.thenBlock()->empty()) {
200  rewriter.eraseOp(newIfOp.thenBlock()->getTerminator());
201  rewriter.eraseOp(newIfOp.elseBlock()->getTerminator());
202  }
203 
204  // Move over then/else blocks.
205  rewriter.mergeBlocks(ifOp.thenBlock(), newIfOp.thenBlock());
206  rewriter.mergeBlocks(ifOp.elseBlock(), newIfOp.elseBlock());
207 
208  // Update scf.yield of new then-block.
209  auto thenYieldOp = cast<scf::YieldOp>(newIfOp.thenBlock()->getTerminator());
210  rewriter.setInsertionPoint(thenYieldOp);
211  SmallVector<Value> thenYieldValues;
212  for (OpOperand &operand : thenYieldOp->getOpOperands()) {
213  if (operand.get().getType().isa<TensorType>()) {
214  ensureToMemrefOpIsValid(operand.get(),
215  newTypes[operand.getOperandNumber()]);
216  Value toMemrefOp = rewriter.create<bufferization::ToMemrefOp>(
217  operand.get().getLoc(), newTypes[operand.getOperandNumber()],
218  operand.get());
219  operand.set(toMemrefOp);
220  }
221  }
222 
223  // Update scf.yield of new else-block.
224  auto elseYieldOp = cast<scf::YieldOp>(newIfOp.elseBlock()->getTerminator());
225  rewriter.setInsertionPoint(elseYieldOp);
226  SmallVector<Value> elseYieldValues;
227  for (OpOperand &operand : elseYieldOp->getOpOperands()) {
228  if (operand.get().getType().isa<TensorType>()) {
229  ensureToMemrefOpIsValid(operand.get(),
230  newTypes[operand.getOperandNumber()]);
231  Value toMemrefOp = rewriter.create<bufferization::ToMemrefOp>(
232  operand.get().getLoc(), newTypes[operand.getOperandNumber()],
233  operand.get());
234  operand.set(toMemrefOp);
235  }
236  }
237 
238  // Replace op results.
239  replaceOpWithBufferizedValues(rewriter, op, newIfOp->getResults());
240 
241  return success();
242  }
243 
245  const BufferizationState &state) const {
246  // IfOp results are equivalent to their corresponding yield values if both
247  // yield values are equivalent to each other.
248  auto bufferizableOp = cast<BufferizableOpInterface>(op);
249  SmallVector<OpOperand *> yieldValues =
250  bufferizableOp.getAliasingOpOperand(opResult, state);
251  assert(yieldValues.size() == 2 && "expected 2 yield values");
252  bool equivalentYields = state.areEquivalentBufferizedValues(
253  yieldValues[0]->get(), yieldValues[1]->get());
254  return equivalentYields ? BufferRelation::Equivalent : BufferRelation::None;
255  }
256 };
257 
258 /// Bufferization of scf.for. Replace with a new scf.for that operates on
259 /// memrefs.
261  : public BufferizableOpInterface::ExternalModel<ForOpInterface,
262  scf::ForOp> {
264  const BufferizationState &state) const {
265  // scf::ForOp alone doesn't bufferize to a memory read, one of the uses of
266  // its matching bbArg may.
267  auto forOp = cast<scf::ForOp>(op);
268  return state.isValueRead(forOp.getRegionIterArgForOpOperand(opOperand));
269  }
270 
272  const BufferizationState &state) const {
273  // Tensor iter_args of scf::ForOps are always considered as a write. This is
274  // to simplify the analysis.
275  // TODO: Consider doing sth. like isValueWritten.
276  return true;
277  }
278 
280  const BufferizationState &state) const {
281  auto forOp = cast<scf::ForOp>(op);
282  if (!opOperand.get().getType().isa<RankedTensorType>())
283  return OpResult();
284  return forOp.getResultForOpOperand(opOperand);
285  }
286 
288  const BufferizationState &state) const {
289  // ForOp results are equivalent to their corresponding init_args if the
290  // corresponding iter_args and yield values are equivalent.
291  auto forOp = cast<scf::ForOp>(op);
292  OpOperand &forOperand = forOp.getOpOperandForResult(opResult);
293  auto bbArg = forOp.getRegionIterArgForOpOperand(forOperand);
294  auto yieldOp = cast<scf::YieldOp>(&forOp.getLoopBody().front().back());
295  bool equivalentYield = state.areEquivalentBufferizedValues(
296  bbArg, yieldOp->getOperand(opResult.getResultNumber()));
297  return equivalentYield ? BufferRelation::Equivalent : BufferRelation::None;
298  }
299 
301  const BufferizationState &state) const {
302  // Interestingly, scf::ForOp's bbArg can **always** be viewed
303  // inplace from the perspective of ops nested under:
304  // 1. Either the matching iter operand is not bufferized inplace and an
305  // alloc + optional copy makes the bbArg itself inplaceable.
306  // 2. Or the matching iter operand is bufferized inplace and bbArg just
307  // bufferizes to that too.
308  return true;
309  }
310 
312  const BufferizationState &state) const {
313  auto forOp = cast<scf::ForOp>(op);
314  Block *oldLoopBody = &forOp.getLoopBody().front();
315 
316  // Indices of all iter_args that have tensor type. These are the ones that
317  // are bufferized.
318  DenseSet<int64_t> indices;
319  for (const auto &it : llvm::enumerate(forOp.getInitArgs()))
320  if (it.value().getType().isa<TensorType>())
321  indices.insert(it.index());
322 
323  // Given a range of values, apply `func` to those marked in `indices`.
324  // Otherwise, store the unmodified value in the result vector.
325  auto convert = [&](ValueRange values,
327  SmallVector<Value> result;
328  for (const auto &it : llvm::enumerate(values)) {
329  size_t idx = it.index();
330  Value val = it.value();
331  result.push_back(indices.contains(idx) ? func(val, idx) : val);
332  }
333  return result;
334  };
335 
336  // Construct a new scf.for op with memref instead of tensor values.
337  SmallVector<Value> initArgs;
338  for (OpOperand &opOperand : forOp.getIterOpOperands()) {
339  if (opOperand.get().getType().isa<TensorType>()) {
340  FailureOr<Value> resultBuffer = state.getBuffer(rewriter, opOperand);
341  if (failed(resultBuffer))
342  return failure();
343  initArgs.push_back(*resultBuffer);
344  } else {
345  initArgs.push_back(opOperand.get());
346  }
347  }
348  auto newForOp = rewriter.create<scf::ForOp>(
349  forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(),
350  forOp.getStep(), initArgs);
351  Block *loopBody = &newForOp.getLoopBody().front();
352 
353  // Set up new iter_args. The loop body uses tensors, so wrap the (memref)
354  // iter_args of the new loop in ToTensorOps.
355  rewriter.setInsertionPointToStart(loopBody);
356  SmallVector<Value> iterArgs =
357  convert(newForOp.getRegionIterArgs(), [&](Value val, int64_t index) {
358  return rewriter.create<bufferization::ToTensorOp>(val.getLoc(), val);
359  });
360  iterArgs.insert(iterArgs.begin(), newForOp.getInductionVar());
361 
362  // Erase terminator if present.
363  if (iterArgs.size() == 1)
364  rewriter.eraseOp(loopBody->getTerminator());
365 
366  // Move loop body to new loop.
367  rewriter.mergeBlocks(oldLoopBody, loopBody, iterArgs);
368 
369  // Update scf.yield of new loop.
370  auto yieldOp = cast<scf::YieldOp>(loopBody->getTerminator());
371  rewriter.setInsertionPoint(yieldOp);
372  SmallVector<Value> yieldValues =
373  convert(yieldOp.getResults(), [&](Value val, int64_t index) {
374  ensureToMemrefOpIsValid(val, initArgs[index].getType());
375  return rewriter.create<bufferization::ToMemrefOp>(
376  val.getLoc(), initArgs[index].getType(), val);
377  });
378  yieldOp.getResultsMutable().assign(yieldValues);
379 
380  // Replace loop results.
381  replaceOpWithBufferizedValues(rewriter, op, newForOp->getResults());
382 
383  return success();
384  }
385 };
386 
390  BufferizationAliasInfo &aliasInfo, SmallVector<Operation *> &newOps) {
391  LogicalResult status = success();
392 
393  op->walk([&](scf::ForOp forOp) {
394  auto yieldOp =
395  cast<scf::YieldOp>(forOp.getLoopBody().front().getTerminator());
396  for (OpOperand &operand : yieldOp->getOpOperands()) {
397  auto tensorType = operand.get().getType().dyn_cast<TensorType>();
398  if (!tensorType)
399  continue;
400 
401  OpOperand &forOperand = forOp.getOpOperandForResult(
402  forOp->getResult(operand.getOperandNumber()));
403  auto bbArg = forOp.getRegionIterArgForOpOperand(forOperand);
404  // Note: This is overly strict. We should check for aliasing bufferized
405  // values. But we don't have a "must-alias" analysis yet.
406  if (!aliasInfo.areEquivalentBufferizedValues(operand.get(), bbArg)) {
407  // TODO: this could get resolved with copies but it can also turn into
408  // swaps so we need to be careful about order of copies.
409  status =
410  yieldOp->emitError()
411  << "Yield operand #" << operand.getOperandNumber()
412  << " does not bufferize to a buffer that is aliasing the matching"
413  << " enclosing scf::for operand";
414  return WalkResult::interrupt();
415  }
416  }
417  return WalkResult::advance();
418  });
419 
420  return status;
421 }
422 
423 /// Bufferization of scf.yield. Bufferized as part of their enclosing ops, so
424 /// this is for analysis only.
426  : public BufferizableOpInterface::ExternalModel<YieldOpInterface,
427  scf::YieldOp> {
429  const BufferizationState &state) const {
430  return true;
431  }
432 
434  const BufferizationState &state) const {
435  return false;
436  }
437 
439  const BufferizationState &state) const {
440  if (isa<scf::IfOp>(op->getParentOp()))
441  return op->getParentOp()->getResult(opOperand.getOperandNumber());
442  if (isa<scf::ExecuteRegionOp>(op->getParentOp()))
443  return op->getParentOp()->getResult(opOperand.getOperandNumber());
444  return OpResult();
445  }
446 
448  const BufferizationState &state) const {
449  // Yield operands always bufferize inplace. Otherwise, an alloc + copy
450  // may be generated inside the block. We should not return/yield allocations
451  // when possible.
452  return true;
453  }
454 
456  const BufferizationState &state) const {
457  auto yieldOp = cast<scf::YieldOp>(op);
458  if (!isa<scf::ExecuteRegionOp, scf::IfOp, scf::ForOp>(
459  yieldOp->getParentOp()))
460  return yieldOp->emitError("unsupported scf::YieldOp parent");
461  return success();
462  }
463 };
464 
465 } // namespace scf_ext
466 } // namespace comprehensive_bufferize
467 } // namespace linalg
468 } // namespace mlir
469 
472  registry.addOpInterface<scf::ExecuteRegionOp,
474  registry.addOpInterface<scf::ForOp, scf_ext::ForOpInterface>();
475  registry.addOpInterface<scf::IfOp, scf_ext::IfOpInterface>();
476  registry.addOpInterface<scf::YieldOp, scf_ext::YieldOpInterface>();
477  registry.addOpInterface<scf::ParallelOp,
479 }
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
Include the generated interface declarations.
OpTy create(Location location, Args &&...args)
Create an operation of specific op type at the current insertion point.
Definition: Builders.h:430
BufferRelation bufferRelation(Operation *op, OpResult opResult, const BufferizationState &state) const
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:28
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
This is a value defined by a result of an operation.
Definition: Value.h:423
Block represents an ordered list of Operations.
Definition: Block.h:29
Bufferization of scf.if. Replace with a new scf.if that yields memrefs.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:329
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
Definition: LogicalResult.h:72
BufferRelation bufferRelation(Operation *op, OpResult opResult, const BufferizationState &state) const
OpResult getAliasingOpResult(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
LogicalResult run(Operation *op, bufferization::BufferizationState &state, bufferization::BufferizationAliasInfo &aliasInfo, SmallVector< Operation *> &newOps) override
Run the post analysis step.
bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read. ...
BufferRelation bufferRelation(Operation *op, OpResult opResult, const BufferizationState &state) const
Operation & front()
Definition: Block.h:144
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationState &state) const
The BufferizationAliasInfo class maintains a list of buffer aliases and equivalence classes to suppor...
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
SmallVector< OpOperand * > getAliasingOpOperand(Operation *op, OpResult opResult, const BufferizationState &state) const
static constexpr const bool value
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationState &state) const
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:343
bool mustBufferizeInPlace(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
std::enable_if< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT >::type walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one)...
Definition: Operation.h:515
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition: Value.cpp:212
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
virtual void replaceOp(Operation *op, ValueRange newValues)
This method replaces the results of the operation with the specified list of values.
void addOpInterface()
Add an external op interface model for an op that belongs to a dialect, both provided as template par...
Definition: Dialect.h:382
This class provides support for representing a failure result, or a valid value of type T...
Definition: LogicalResult.h:77
U dyn_cast() const
Definition: Types.h:244
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition: Operation.h:117
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:206
SmallVector< OpOperand * > getAliasingOpOperand(Operation *op, OpResult opResult, const BufferizationState &state) const
unsigned getResultNumber() const
Returns the number of this result.
Definition: Value.h:435
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
FailureOr< Value > getBuffer(RewriterBase &rewriter, OpOperand &opOperand, bool forceInPlace=false, Optional< Operation *> customCopyInsertionPoint=None) const
Return the buffer (memref) for a given OpOperand (tensor).
OpResult getResult(unsigned idx)
Get the &#39;idx&#39;th result of this operation.
Definition: Operation.h:276
static WalkResult advance()
Definition: Visitors.h:51
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:106
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:133
static WalkResult interrupt()
Definition: Visitors.h:50
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:73
result_range getOpResults()
Definition: Operation.h:289
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
AllocationHoistingBarrierOnly is an external implementation of BufferizableOpInterface for ops that a...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:72
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationState &state) const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:84
bool isMemoryWrite(Operation *op, OpResult opResult, const BufferizationState &state) const
LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationState &state) const
OpResult getAliasingOpResult(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:362
bool isWritable(Operation *op, Value value, const BufferizationState &state) const
BufferizationState provides a variety of helper functions for dealing with tensor values and memref b...
OpTy replaceOpWithNewOp(Operation *op, Args &&... args)
Replaces the result op with a new op that is created without verification.
Definition: PatternMatch.h:741
Type getType() const
Return the type of this value.
Definition: Value.h:117
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
Definition: Dialect.h:282
This class represents an operand of an operation.
Definition: Value.h:249
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
BufferRelation
Specify fine-grain relationship between buffers to enable more analysis.
bool isa() const
Definition: Types.h:234
BaseMemRefType getMemRefType(TensorType tensorType, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace={})
Return a MemRefType to which the tensorType can be bufferized in a composable fashion.
result_range getResults()
Definition: Operation.h:284
virtual void mergeBlocks(Block *source, Block *dest, ValueRange argValues=llvm::None)
Merge the operations of block &#39;source&#39; into the end of block &#39;dest&#39;.
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const BufferizationState &state) const
This class provides an abstraction over the different types of ranges over Values.
bool isMemoryWrite(Operation *op, OpResult opResult, const BufferizationState &state) const
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:688
U cast() const
Definition: Types.h:250
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const =0
Return true if v1 and v2 bufferize to equivalent buffers.