MLIR  22.0.0git
Detensorize.cpp
Go to the documentation of this file.
1 //===- Detensorize.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
15 #include "mlir/IR/OpDefinition.h"
18 #include <utility>
19 
20 namespace mlir {
21 #define GEN_PASS_DEF_LINALGDETENSORIZEPASS
22 #include "mlir/Dialect/Linalg/Passes.h.inc"
23 } // namespace mlir
24 
25 using namespace mlir;
26 using namespace mlir::linalg;
27 
29  ValueRange inputs, Location loc) {
30  assert(inputs.size() == 1);
31  auto inputType = inputs[0].getType();
32  if (isa<TensorType>(inputType))
33  return nullptr;
34 
35  // A detensored value is converted back by creating a new tensor from its
36  // element(s).
37  return builder.create<tensor::FromElementsOp>(
38  loc, RankedTensorType::get({}, inputType), inputs[0]);
39 }
40 
41 namespace {
42 /// Defines the criteria a TensorType must follow in order to be considered
43 /// "detensorable".
44 ///
45 /// NOTE: For now, only 0-D tensors are supported.
46 ///
47 /// Returns true if tensorType can be detensored.
48 bool canBeDetensored(TensorType tensorType) {
49  return tensorType.hasRank() && tensorType.getRank() == 0;
50 }
51 
52 bool shouldBeDetensored(Operation *op, TypeConverter typeConverter) {
53  GenericOp genericOp = dyn_cast_or_null<GenericOp>(op);
54  return genericOp &&
55  llvm::all_of(genericOp->getOpOperands(), [&](OpOperand &opOperand) {
56  return !typeConverter.isLegal(opOperand.get().getType());
57  });
58 }
59 
60 /// A conversion pattern for detensoring `linalg.generic` ops.
61 class DetensorizeGenericOp : public OpConversionPattern<GenericOp> {
62 public:
64  LogicalResult
65  matchAndRewrite(GenericOp op, OpAdaptor adaptor,
66  ConversionPatternRewriter &rewriter) const override {
67  Block *originalBlock = op->getBlock();
68 
69  // Gather some information about the op before inlining its region.
70  Block *opEntryBlock = &*op.getRegion().begin();
71  YieldOp yieldOp = dyn_cast<YieldOp>(op.getRegion().back().getTerminator());
72 
73  // Split the op's region before the op. This way, we have a clear insertion
74  // point in which the op can be inlined.
75  Block *newBlock = rewriter.splitBlock(originalBlock, Block::iterator(op));
76  rewriter.inlineRegionBefore(op.getRegion(), newBlock);
77  // Now that op's region is inlined, the operands of its YieldOp are mapped
78  // to the materialized target values. Therefore, we can replace the op's
79  // uses with those of its YielOp's operands.
80  rewriter.replaceOp(op, yieldOp->getOperands());
81 
82  // No need for these intermediate blocks, merge them into 1.
83  rewriter.mergeBlocks(opEntryBlock, originalBlock, adaptor.getOperands());
84  rewriter.mergeBlocks(newBlock, originalBlock, {});
85 
86  rewriter.eraseOp(&*Block::iterator(yieldOp));
87 
88  return success();
89  }
90 };
91 
92 /// A conversion pattern for detensoring internal (non-entry) blocks within a
93 /// function.
94 struct FunctionNonEntryBlockConversion
95  : public OpInterfaceConversionPattern<FunctionOpInterface> {
96  FunctionNonEntryBlockConversion(MLIRContext *ctx, TypeConverter &converter,
97  DenseSet<BlockArgument> blockArgsToDetensor)
98  : OpInterfaceConversionPattern(converter, ctx),
99  blockArgsToDetensor(std::move(blockArgsToDetensor)) {}
100 
101  LogicalResult
102  matchAndRewrite(FunctionOpInterface op, ArrayRef<Value> operands,
103  ConversionPatternRewriter &rewriter) const override {
104  rewriter.startOpModification(op);
105  Region &region = op.getFunctionBody();
106 
107  for (Block &block :
108  llvm::make_early_inc_range(llvm::drop_begin(region, 1))) {
110  /*numOrigInputs=*/block.getNumArguments());
111 
112  for (BlockArgument blockArgument : block.getArguments()) {
113  int idx = blockArgument.getArgNumber();
114 
115  if (blockArgsToDetensor.count(blockArgument))
116  conversion.addInputs(idx, {getTypeConverter()->convertType(
117  block.getArgumentTypes()[idx])});
118  else
119  conversion.addInputs(idx, {block.getArgumentTypes()[idx]});
120  }
121 
122  rewriter.applySignatureConversion(&block, conversion, getTypeConverter());
123  }
124 
125  rewriter.finalizeOpModification(op);
126  return success();
127  }
128 
129 private:
130  const DenseSet<BlockArgument> blockArgsToDetensor;
131 };
132 
133 class DetensorizeTypeConverter : public TypeConverter {
134 public:
135  DetensorizeTypeConverter() {
136  addConversion([](Type type) { return type; });
137 
138  // A TensorType that can be detensored, is converted to the underlying
139  // element type.
140  addConversion([](TensorType tensorType) -> Type {
141  if (canBeDetensored(tensorType))
142  return tensorType.getElementType();
143 
144  return tensorType;
145  });
146 
147  // A tensor value is detensoried by extracting its element(s).
148  addTargetMaterialization([](OpBuilder &builder, Type type,
149  ValueRange inputs, Location loc) -> Value {
150  return builder.create<tensor::ExtractOp>(loc, inputs[0], ValueRange{});
151  });
152 
153  addSourceMaterialization(sourceMaterializationCallback);
154  }
155 };
156 
157 /// @see LinalgDetensorize in Linalg/Passes.td for more details.
158 struct LinalgDetensorize
159  : public impl::LinalgDetensorizePassBase<LinalgDetensorize> {
160  using impl::LinalgDetensorizePassBase<
161  LinalgDetensorize>::LinalgDetensorizePassBase;
162  LinalgDetensorize() = default;
163 
164  class CostModel {
165  public:
166  virtual ~CostModel() = default;
167 
168  /// A cost model algorithm computes the following outputs:
169  ///
170  /// - opsToDetensor: the list of linalg ops that should be
171  /// detensored.
172  ///
173  /// - blockArgsToDetensor: since the operands and results of detensored
174  /// linalg ops can cross the BB boundary (e.g. a linalg op's input can come
175  /// from a BB argument and a linalg op's output can be passed to successor
176  /// BBs), we need to maintain the sub-set of arguments that should be
177  /// detensored (i.e. converted by typeConverter) for each affected BB.
178  ///
179  /// Example:
180  ///
181  /// For the following snippet:
182  /// ...
183  /// ^bb1(%6: tensor<i32>, %9: tensor<i32>):
184  /// %7 = tensor.empty() : tensor<i32>
185  /// %8 = linalg.generic #attrs
186  /// ins(%6, %6 : tensor<i32>, tensor<i32>)
187  /// outs(%7 : tensor<i32>) {
188  /// ^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
189  /// %9 = arith.addi %arg0, %arg1 : i32
190  /// linalg.yield %9 : i32
191  /// } -> tensor<i32>
192  /// %10 = "some.op"(%9)
193  /// br ^bb2(%8 : tensor<i32>)
194  /// ...
195  ///
196  /// if the cost model decides that the linalg.generic op should be
197  /// detensored, then:
198  /// - opsToDetensor should be = {linalg.generic{add}}.
199  /// - blockArgsToDetensor should be = {bb1 -> {0}, bb2 -> {0}}.
200  virtual void compute(FunctionOpInterface func,
201  DetensorizeTypeConverter typeConverter,
202  DenseSet<Operation *> &opsToDetensor,
203  DenseSet<BlockArgument> &blockArgsToDetensor) = 0;
204 
205  /// From the blockArgsToDetensor set computed by a CostModel
206  /// implementation, this method computes the corresponding branch op
207  /// detensoring. The result is a map from a branch op to a subset of indices
208  /// of its operands. The indices specify which of the branch op's operands
209  /// should be detensored.
210  ///
211  /// For the previous example, this method would compute: {bb2 -> {0}}.
212  static DenseMap<Operation *, DenseSet<int>> computeBranchOpDetensoring(
213  const DenseSet<BlockArgument> &blockArgsToDetensor) {
214  DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
215 
216  for (auto blockArgumentElem : blockArgsToDetensor) {
217  Block *block = blockArgumentElem.getOwner();
218 
219  for (PredecessorIterator pred = block->pred_begin();
220  pred != block->pred_end(); ++pred) {
221  BranchOpInterface terminator =
222  dyn_cast<BranchOpInterface>((*pred)->getTerminator());
223  auto blockOperands =
224  terminator.getSuccessorOperands(pred.getSuccessorIndex());
225 
226  if (blockOperands.empty() ||
227  blockOperands.isOperandProduced(blockArgumentElem.getArgNumber()))
228  continue;
229 
230  detensorableBranchOps[terminator].insert(
231  blockOperands.getOperandIndex(blockArgumentElem.getArgNumber()));
232  }
233  }
234 
235  return detensorableBranchOps;
236  }
237  };
238 
239  /// Detensorize linalg ops involved in control-flow within a function.
240  ///
241  /// This model starts from BranchOps and CondBranchOps within a function. For
242  /// each such branch, the model then walks the use-def chain for the branch's
243  /// condition backwards in order to understand where the condition's value
244  /// comes from. If the condition value is (indirectly) computed by a linalg op
245  /// that can be detensored, the model then continues walking the use-def chain
246  /// in order to understand where the linalg op's operands come from. This
247  /// leads to discovering a "detensoring component". A detensoring component is
248  /// the set of operations + block arguments that are involved in control-flow
249  /// AND can be detensored.
250  class ControlFlowDetectionModel : public CostModel {
251  public:
252  void compute(FunctionOpInterface func,
253  DetensorizeTypeConverter typeConverter,
254  DenseSet<Operation *> &opsToDetensor,
255  DenseSet<BlockArgument> &blockArgsToDetensor) override {
256  SmallVector<Value> workList;
257 
258  func->walk([&](cf::CondBranchOp condBr) {
259  llvm::append_range(workList, condBr.getOperands());
260  });
261 
262  func->walk([&](cf::BranchOp br) {
263  llvm::append_range(workList, br.getOperands());
264  });
265 
266  DenseSet<Value> visitedValues;
267  DenseSet<Operation *> visitedOps;
268 
269  // For a (to-be-detesored) value, check if it "escapes" the block by being
270  // passed to terminator. If it does, then workList is updated with the
271  // corresponding argument to the successor block.
272  auto updateWorkListWithSuccessorArguments =
273  [&](Value value, BranchOpInterface terminator) {
274  if (!terminator)
275  return;
276 
277  for (auto operandIdx :
278  llvm::seq<unsigned>(0, terminator->getOperands().size())) {
279  Value operand = terminator->getOperand(operandIdx);
280 
281  if (operand == value) {
282  auto succBlockArg =
283  terminator.getSuccessorBlockArgument(operandIdx);
284 
285  if (succBlockArg && !blockArgsToDetensor.count(*succBlockArg))
286  workList.push_back(*succBlockArg);
287  }
288  }
289  };
290 
291  while (!workList.empty()) {
292  Value currentItem = workList.pop_back_val();
293 
294  if (!visitedValues.insert(currentItem).second)
295  continue;
296 
297  // 1 - Look forward:
298  // 1.1 - If currentItem escapes to one or more successors, add
299  // the corresponding successor arguments to workList.
300  updateWorkListWithSuccessorArguments(
301  currentItem, dyn_cast<BranchOpInterface>(
302  currentItem.getParentBlock()->getTerminator()));
303 
304  // 1.2 - For each user of currentItem, add the defined values to
305  // workList. This way, the user ops can be inspected later if they are
306  // detensorable and if so, their operands will be added to workList to
307  // potentially discover other parts of the detensorable component.
308  for (auto *user : currentItem.getUsers())
309  llvm::append_range(workList, user->getResults());
310 
311  // 2 - Look backward:
312  // 2.1 - The current item is defined by a block argument. If the owner
313  // block is a non-entry one, then:
314  // * Add the argument to blockArgsToDetensor.
315  // * Walk the use-def chain backwards to add each predecessor's
316  // terminator-operands corresponding to currentItem to workList.
317  if (auto currentItemBlockArgument =
318  dyn_cast<BlockArgument>(currentItem)) {
319  Block *ownerBlock = currentItemBlockArgument.getOwner();
320 
321  // Function arguments are not detensored/converted.
322  if (&*ownerBlock->getParent()->begin() == ownerBlock)
323  continue;
324 
325  // This inner-block argument is involved in control-flow, it should be
326  // detensored.
327  blockArgsToDetensor.insert(currentItemBlockArgument);
328 
329  for (PredecessorIterator pred = ownerBlock->pred_begin();
330  pred != ownerBlock->pred_end(); ++pred) {
331  BranchOpInterface predTerminator =
332  dyn_cast<BranchOpInterface>((*pred)->getTerminator());
333 
334  // TODO: For now, we give up if any of the control-flow components
335  // in a function is not detensorable. Fix that.
336  if (!predTerminator) {
337  opsToDetensor.clear();
338  blockArgsToDetensor.clear();
339  return;
340  }
341 
342  auto ownerBlockOperands =
343  predTerminator.getSuccessorOperands(pred.getSuccessorIndex());
344 
345  if (ownerBlockOperands.empty() ||
346  ownerBlockOperands.isOperandProduced(
347  currentItemBlockArgument.getArgNumber()))
348  continue;
349 
350  // For each predecessor, add the value it passes to that argument to
351  // workList to find out how it's computed.
352  workList.push_back(
353  ownerBlockOperands[currentItemBlockArgument.getArgNumber()]);
354  }
355 
356  continue;
357  }
358 
359  Operation *currentItemDefiningOp = currentItem.getDefiningOp();
360 
361  if (!visitedOps.insert(currentItemDefiningOp).second)
362  continue;
363 
364  // 2.2 - The current item is computed by a GenericOp. If the op should
365  // be detensored, then:
366  // * Add it to opsToDetensor.
367  // * Add its operands to workList to discover other parts of the
368  // potentially detensorable component.
369  if (auto genericOp = dyn_cast<GenericOp>(currentItemDefiningOp)) {
370  // The op was encountered already, no need to inspect it again.
371  if (opsToDetensor.count(genericOp))
372  continue;
373 
374  // The op should not be detensored, give up on it but continue with
375  // discovering the rest of the control-flow component.
376  if (!shouldBeDetensored(genericOp, typeConverter)) {
377  continue;
378  }
379 
380  opsToDetensor.insert(genericOp);
381  llvm::append_range(workList, genericOp.getInputs());
382  continue;
383  }
384 
385  // 2.3 - The current item is the result of a FromElementsOp, it will be
386  // trivially detensored later as part of canonicalization patterns
387  // applied at the end of detensoring.
388  //
389  // Note: No need to check whether the result type of this op is
390  // detensorable since if it wasn't we wouldn't reach that point in the
391  // work list.
392  if (isa<tensor::FromElementsOp>(currentItemDefiningOp))
393  continue;
394 
395  // 2.4 - The current item is the result of a scalar op, add all its
396  // operands to the work list.
397  if (llvm::all_of(
398  currentItemDefiningOp->getResultTypes(),
399  [&](Type resultType) { return resultType.isIntOrFloat(); }))
400  llvm::append_range(workList, currentItemDefiningOp->getOperands());
401  }
402 
403  // Since the cost model gives up on some ops (see the details of step 2.2
404  // above), block arguments that correspond to the values produced by those
405  // ops should not be detensored as well.
406 
407  DenseSet<BlockArgument> blockArgsToRemove;
408 
409  for (auto &blockArg : blockArgsToDetensor) {
410  Block *block = blockArg.getParentBlock();
411 
412  // For the potentially detensorable block argument, find the
413  // corresponding operands in predecessor blocks.
414  for (PredecessorIterator pred = block->pred_begin();
415  pred != block->pred_end(); ++pred) {
416  BranchOpInterface terminator =
417  dyn_cast<BranchOpInterface>((*pred)->getTerminator());
418  auto blockOperands =
419  terminator.getSuccessorOperands(pred.getSuccessorIndex());
420 
421  if (blockOperands.empty() ||
422  blockOperands.isOperandProduced(blockArg.getArgNumber()))
423  continue;
424 
425  Operation *definingOp =
426  blockOperands[blockArg.getArgNumber()].getDefiningOp();
427 
428  // If the operand is defined by a GenericOp that will not be
429  // detensored, then do not detensor the corresponding block argument.
430  if (isa_and_nonnull<GenericOp>(definingOp) &&
431  opsToDetensor.count(definingOp) == 0) {
432  blockArgsToRemove.insert(blockArg);
433  break;
434  }
435  }
436  }
437 
438  for (auto &blockArg : blockArgsToRemove) {
439  blockArgsToDetensor.erase(blockArg);
440  }
441  }
442  };
443 
444  /// Detensorize everything that can detensored.
445  class AggressiveDetensoringModel : public CostModel {
446  public:
447  void compute(FunctionOpInterface func,
448  DetensorizeTypeConverter typeConverter,
449  DenseSet<Operation *> &opsToDetensor,
450  DenseSet<BlockArgument> &blockArgsToDetensor) override {
451  func->walk([&](GenericOp genericOp) {
452  if (shouldBeDetensored(genericOp, typeConverter))
453  opsToDetensor.insert(genericOp);
454  });
455 
456  for (Block &block : llvm::drop_begin(func.getFunctionBody(), 1))
457  blockArgsToDetensor.insert_range(block.getArguments());
458  }
459  };
460 
461  void runOnOperation() override {
462  MLIRContext *context = &getContext();
463  DetensorizeTypeConverter typeConverter;
464  RewritePatternSet patterns(context);
465  ConversionTarget target(*context);
466  DenseSet<Operation *> opsToDetensor;
467  DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
468  DenseSet<BlockArgument> blockArgsToDetensor;
469  FunctionOpInterface funcOp = getOperation();
470 
471  if (funcOp.getFunctionBody().empty())
472  return;
473 
474  // Make sure the entry block of the function doesn't contain any Linalg ops.
475  // Otherwise, it may lead to the signature of the block being changed by the
476  // dialect conversion below, which would make the function op invalid
477  // because its type shouldn't change.
478  IRRewriter rewriter(funcOp->getContext());
479  Block *entryBlock = &funcOp.getFunctionBody().front();
480  Block *postEntryBlock =
481  rewriter.splitBlock(entryBlock, entryBlock->begin());
482  rewriter.setInsertionPointToStart(entryBlock);
483  auto branch =
484  rewriter.create<cf::BranchOp>(rewriter.getUnknownLoc(), postEntryBlock);
485 
486  if (aggressiveMode.getValue()) {
487  AggressiveDetensoringModel costModel;
488  costModel.compute(funcOp, typeConverter, opsToDetensor,
489  blockArgsToDetensor);
490  } else {
491  ControlFlowDetectionModel costModel;
492  costModel.compute(funcOp, typeConverter, opsToDetensor,
493  blockArgsToDetensor);
494  }
495 
496  detensorableBranchOps =
497  CostModel::computeBranchOpDetensoring(blockArgsToDetensor);
498 
499  target.addDynamicallyLegalOp<GenericOp>(
500  [&](GenericOp op) { return !opsToDetensor.count(op); });
501 
502  target.markUnknownOpDynamicallyLegal([&](Operation *op) {
503  // A function is legal if all of its non-entry blocks are legal. We
504  // don't legalize the entry block (i.e. the function's signature)
505  // since detensoring can't happen along external calling convention
506  // boundaries, which we conservatively approximate as all function
507  // signatures.
508  if (auto funcOp = dyn_cast<FunctionOpInterface>(op)) {
509  Region &body = funcOp.getFunctionBody();
510  return llvm::all_of(llvm::drop_begin(body, 1), [&](Block &block) {
511  return !llvm::any_of(
512  blockArgsToDetensor, [&](BlockArgument blockArgument) {
513  return blockArgument.getOwner() == &block &&
514  !typeConverter.isLegal(blockArgument.getType());
515  });
516  });
517  }
518 
520  isLegalForReturnOpTypeConversionPattern(op, typeConverter,
521  /*returnOpAlwaysLegal*/ true))
522  return true;
523 
524  if (auto branchOp = dyn_cast<BranchOpInterface>(op)) {
525  if (!detensorableBranchOps.count(branchOp))
526  return true;
527 
528  for (auto operandIdx : detensorableBranchOps[branchOp])
529  if (!typeConverter.isLegal(
530  branchOp->getOperand(operandIdx).getType()))
531  return false;
532 
533  return true;
534  }
535 
536  return false;
537  });
538 
539  patterns.add<DetensorizeGenericOp>(typeConverter, context);
540  patterns.add<FunctionNonEntryBlockConversion>(context, typeConverter,
541  blockArgsToDetensor);
542  // Since non-entry block arguments get detensorized, we also need to
543  // update the control flow inside the function to reflect the correct
544  // types.
545  auto shouldConvertBranchOperand = [&](BranchOpInterface branchOp,
546  int operandIdx) -> bool {
547  return detensorableBranchOps.count(branchOp) &&
548  detensorableBranchOps[branchOp].count(operandIdx);
549  };
550 
552  shouldConvertBranchOperand);
553 
554  if (failed(
555  applyFullConversion(getOperation(), target, std::move(patterns))))
556  signalPassFailure();
557 
558  RewritePatternSet canonPatterns(context);
559  tensor::FromElementsOp::getCanonicalizationPatterns(canonPatterns, context);
560  if (failed(applyPatternsGreedily(getOperation(), std::move(canonPatterns))))
561  signalPassFailure();
562 
563  // Get rid of the dummy entry block we created in the beginning to work
564  // around dialect conversion signature rewriting.
565  rewriter.eraseOp(branch);
566  rewriter.mergeBlocks(postEntryBlock, entryBlock);
567  }
568 };
569 } // namespace
static Value sourceMaterializationCallback(OpBuilder &builder, Type type, ValueRange inputs, Location loc)
Definition: Detensorize.cpp:28
static MLIRContext * getContext(OpFoldResult val)
This class represents an argument of a Block.
Definition: Value.h:309
Block * getOwner() const
Returns the block that owns this argument.
Definition: Value.h:318
Block represents an ordered list of Operations.
Definition: Block.h:33
OpListType::iterator iterator
Definition: Block.h:140
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:27
pred_iterator pred_begin()
Definition: Block.h:233
Operation * getTerminator()
Get the terminator operation of this block.
Definition: Block.cpp:244
BlockArgListType getArguments()
Definition: Block.h:87
Operation & front()
Definition: Block.h:153
iterator begin()
Definition: Block.h:143
pred_iterator pred_end()
Definition: Block.h:236
Location getUnknownLoc()
Definition: Builders.cpp:24
This class implements a pattern rewriter for use with ConversionPatterns.
void replaceOp(Operation *op, ValueRange newValues) override
Replace the given operation with the new values.
Block * applySignatureConversion(Block *block, TypeConverter::SignatureConversion &conversion, const TypeConverter *converter=nullptr)
Apply a signature conversion to given block.
void startOpModification(Operation *op) override
PatternRewriter hook for updating the given operation in-place.
void eraseOp(Operation *op) override
PatternRewriter hook for erasing a dead operation.
void finalizeOpModification(Operation *op) override
PatternRewriter hook for updating the given operation in-place.
This class describes a specific conversion target.
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
Definition: PatternMatch.h:748
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:205
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:429
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:452
OpConversionPattern is a wrapper around ConversionPattern that allows for matching and rewriting agai...
OpConversionPattern(MLIRContext *context, PatternBenefit benefit=1)
OpInterfaceConversionPattern is a wrapper around ConversionPattern that allows for matching and rewri...
This class represents an operand of an operation.
Definition: Value.h:257
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
result_type_range getResultTypes()
Definition: Operation.h:428
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:378
Implement a predecessor iterator for blocks.
Definition: BlockSupport.h:51
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
iterator begin()
Definition: Region.h:55
Block * splitBlock(Block *block, Block::iterator before)
Split the operations starting at "before" (inclusive) out of the given block into a new block,...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues={})
Inline the operations of block 'source' into the end of block 'dest'.
void inlineRegionBefore(Region &region, Region &parent, Region::iterator before)
Move the blocks that belong to "region" before the given position in another region "parent".
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:55
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
Type getElementType() const
Returns the element type of this tensor type.
This class provides all of the information necessary to convert a type signature.
Type conversion class.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
type_range getType() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
Block * getParentBlock()
Return the Block in which this Value is defined.
Definition: Value.cpp:48
user_range getUsers() const
Definition: Value.h:218
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Include the generated interface declarations.
bool isLegalForReturnOpTypeConversionPattern(Operation *op, const TypeConverter &converter, bool returnOpAlwaysLegal=false)
For ReturnLike ops (except return), return True.
LogicalResult applyPatternsGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
LogicalResult applyFullConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Apply a complete conversion on the given operations, and all nested operations.
bool isNotBranchOpInterfaceOrReturnLikeOp(Operation *op)
Return true if op is neither BranchOpInterface nor ReturnLike.
const FrozenRewritePatternSet & patterns
void populateBranchOpInterfaceTypeConversionPattern(RewritePatternSet &patterns, const TypeConverter &converter, function_ref< bool(BranchOpInterface branchOp, int idx)> shouldConvertBranchOperand=nullptr)
Add a pattern to the given pattern list to rewrite branch operations to use operands that have been l...
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...