MLIR  20.0.0git
Detensorize.cpp
Go to the documentation of this file.
1 //===- Detensorize.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
16 #include "mlir/IR/OpDefinition.h"
19 #include <iterator>
20 #include <memory>
21 #include <utility>
22 
23 namespace mlir {
24 #define GEN_PASS_DEF_LINALGDETENSORIZEPASS
25 #include "mlir/Dialect/Linalg/Passes.h.inc"
26 } // namespace mlir
27 
28 using namespace mlir;
29 using namespace mlir::linalg;
30 
32  ValueRange inputs, Location loc) {
33  assert(inputs.size() == 1);
34  auto inputType = inputs[0].getType();
35  if (isa<TensorType>(inputType))
36  return nullptr;
37 
38  // A detensored value is converted back by creating a new tensor from its
39  // element(s).
40  return builder.create<tensor::FromElementsOp>(
41  loc, RankedTensorType::get({}, inputType), inputs[0]);
42 }
43 
44 namespace {
45 /// Defines the criteria a TensorType must follow in order to be considered
46 /// "detensorable".
47 ///
48 /// NOTE: For now, only 0-D tensors are supported.
49 ///
50 /// Returns true if tensorType can be detensored.
51 bool canBeDetensored(TensorType tensorType) {
52  return tensorType.hasRank() && tensorType.getRank() == 0;
53 }
54 
55 bool shouldBeDetensored(Operation *op, TypeConverter typeConverter) {
56  GenericOp genericOp = dyn_cast_or_null<GenericOp>(op);
57  return genericOp &&
58  llvm::all_of(genericOp->getOpOperands(), [&](OpOperand &opOperand) {
59  return !typeConverter.isLegal(opOperand.get().getType());
60  });
61 }
62 
63 /// A conversion pattern for detensoring `linalg.generic` ops.
64 class DetensorizeGenericOp : public OpConversionPattern<GenericOp> {
65 public:
67  LogicalResult
68  matchAndRewrite(GenericOp op, OpAdaptor adaptor,
69  ConversionPatternRewriter &rewriter) const override {
70  Block *originalBlock = op->getBlock();
71 
72  // Gather some information about the op before inlining its region.
73  Block *opEntryBlock = &*op.getRegion().begin();
74  YieldOp yieldOp = dyn_cast<YieldOp>(op.getRegion().back().getTerminator());
75 
76  // Split the op's region before the op. This way, we have a clear insertion
77  // point in which the op can be inlined.
78  Block *newBlock = rewriter.splitBlock(originalBlock, Block::iterator(op));
79  rewriter.inlineRegionBefore(op.getRegion(), newBlock);
80  // Now that op's region is inlined, the operands of its YieldOp are mapped
81  // to the materialized target values. Therefore, we can replace the op's
82  // uses with those of its YielOp's operands.
83  rewriter.replaceOp(op, yieldOp->getOperands());
84 
85  // No need for these intermediate blocks, merge them into 1.
86  rewriter.mergeBlocks(opEntryBlock, originalBlock, adaptor.getOperands());
87  rewriter.mergeBlocks(newBlock, originalBlock, {});
88 
89  rewriter.eraseOp(&*Block::iterator(yieldOp));
90 
91  return success();
92  }
93 };
94 
95 /// A conversion pattern for detensoring internal (non-entry) blocks within a
96 /// function.
97 struct FunctionNonEntryBlockConversion
98  : public OpInterfaceConversionPattern<FunctionOpInterface> {
99  FunctionNonEntryBlockConversion(MLIRContext *ctx, TypeConverter &converter,
100  DenseSet<BlockArgument> blockArgsToDetensor)
101  : OpInterfaceConversionPattern(converter, ctx),
102  blockArgsToDetensor(std::move(blockArgsToDetensor)) {}
103 
104  LogicalResult
105  matchAndRewrite(FunctionOpInterface op, ArrayRef<Value> operands,
106  ConversionPatternRewriter &rewriter) const override {
107  rewriter.startOpModification(op);
108  Region &region = op.getFunctionBody();
109 
110  for (Block &block :
111  llvm::make_early_inc_range(llvm::drop_begin(region, 1))) {
113  /*numOrigInputs=*/block.getNumArguments());
114 
115  for (BlockArgument blockArgument : block.getArguments()) {
116  int idx = blockArgument.getArgNumber();
117 
118  if (blockArgsToDetensor.count(blockArgument))
119  conversion.addInputs(idx, {getTypeConverter()->convertType(
120  block.getArgumentTypes()[idx])});
121  else
122  conversion.addInputs(idx, {block.getArgumentTypes()[idx]});
123  }
124 
125  rewriter.applySignatureConversion(&block, conversion, getTypeConverter());
126  }
127 
128  rewriter.finalizeOpModification(op);
129  return success();
130  }
131 
132 private:
133  const DenseSet<BlockArgument> blockArgsToDetensor;
134 };
135 
136 class DetensorizeTypeConverter : public TypeConverter {
137 public:
138  DetensorizeTypeConverter() {
139  addConversion([](Type type) { return type; });
140 
141  // A TensorType that can be detensored, is converted to the underlying
142  // element type.
143  addConversion([](TensorType tensorType) -> Type {
144  if (canBeDetensored(tensorType))
145  return tensorType.getElementType();
146 
147  return tensorType;
148  });
149 
150  // A tensor value is detensoried by extracting its element(s).
151  addTargetMaterialization([](OpBuilder &builder, Type type,
152  ValueRange inputs, Location loc) -> Value {
153  return builder.create<tensor::ExtractOp>(loc, inputs[0], ValueRange{});
154  });
155 
156  addSourceMaterialization(sourceMaterializationCallback);
157  addArgumentMaterialization(sourceMaterializationCallback);
158  }
159 };
160 
161 /// @see LinalgDetensorize in Linalg/Passes.td for more details.
162 struct LinalgDetensorize
163  : public impl::LinalgDetensorizePassBase<LinalgDetensorize> {
164  using impl::LinalgDetensorizePassBase<
165  LinalgDetensorize>::LinalgDetensorizePassBase;
166  LinalgDetensorize() = default;
167 
168  class CostModel {
169  public:
170  virtual ~CostModel() = default;
171 
172  /// A cost model algorithm computes the following outputs:
173  ///
174  /// - opsToDetensor: the list of linalg ops that should be
175  /// detensored.
176  ///
177  /// - blockArgsToDetensor: since the operands and results of detensored
178  /// linalg ops can cross the BB boundary (e.g. a linalg op's input can come
179  /// from a BB argument and a linalg op's output can be passed to successor
180  /// BBs), we need to maintain the sub-set of arguments that should be
181  /// detensored (i.e. converted by typeConverter) for each affected BB.
182  ///
183  /// Example:
184  ///
185  /// For the following snippet:
186  /// ...
187  /// ^bb1(%6: tensor<i32>, %9: tensor<i32>):
188  /// %7 = tensor.empty() : tensor<i32>
189  /// %8 = linalg.generic #attrs
190  /// ins(%6, %6 : tensor<i32>, tensor<i32>)
191  /// outs(%7 : tensor<i32>) {
192  /// ^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
193  /// %9 = arith.addi %arg0, %arg1 : i32
194  /// linalg.yield %9 : i32
195  /// } -> tensor<i32>
196  /// %10 = "some.op"(%9)
197  /// br ^bb2(%8 : tensor<i32>)
198  /// ...
199  ///
200  /// if the cost model decides that the linalg.generic op should be
201  /// detensored, then:
202  /// - opsToDetensor should be = {linalg.generic{add}}.
203  /// - blockArgsToDetensor should be = {bb1 -> {0}, bb2 -> {0}}.
204  virtual void compute(FunctionOpInterface func,
205  DetensorizeTypeConverter typeConverter,
206  DenseSet<Operation *> &opsToDetensor,
207  DenseSet<BlockArgument> &blockArgsToDetensor) = 0;
208 
209  /// From the blockArgsToDetensor set computed by a CostModel
210  /// implementation, this method computes the corresponding branch op
211  /// detensoring. The result is a map from a branch op to a subset of indices
212  /// of its operands. The indices specify which of the branch op's operands
213  /// should be detensored.
214  ///
215  /// For the previous example, this method would compute: {bb2 -> {0}}.
216  static DenseMap<Operation *, DenseSet<int>> computeBranchOpDetensoring(
217  const DenseSet<BlockArgument> &blockArgsToDetensor) {
218  DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
219 
220  for (auto blockArgumentElem : blockArgsToDetensor) {
221  Block *block = blockArgumentElem.getOwner();
222 
223  for (PredecessorIterator pred = block->pred_begin();
224  pred != block->pred_end(); ++pred) {
225  BranchOpInterface terminator =
226  dyn_cast<BranchOpInterface>((*pred)->getTerminator());
227  auto blockOperands =
228  terminator.getSuccessorOperands(pred.getSuccessorIndex());
229 
230  if (blockOperands.empty() ||
231  blockOperands.isOperandProduced(blockArgumentElem.getArgNumber()))
232  continue;
233 
234  detensorableBranchOps[terminator].insert(
235  blockOperands.getOperandIndex(blockArgumentElem.getArgNumber()));
236  }
237  }
238 
239  return detensorableBranchOps;
240  }
241  };
242 
243  /// Detensorize linalg ops involved in control-flow within a function.
244  ///
245  /// This model starts from BranchOps and CondBranchOps within a function. For
246  /// each such branch, the model then walks the use-def chain for the branch's
247  /// condition backwards in order to understand where the condition's value
248  /// comes from. If the condition value is (indirectly) computed by a linalg op
249  /// that can be detensored, the model then continues walking the use-def chain
250  /// in order to understand where the linalg op's operands come from. This
251  /// leads to discovering a "detensoring component". A detensoring component is
252  /// the set of operations + block arguments that are involved in control-flow
253  /// AND can be detensored.
254  class ControlFlowDetectionModel : public CostModel {
255  public:
256  void compute(FunctionOpInterface func,
257  DetensorizeTypeConverter typeConverter,
258  DenseSet<Operation *> &opsToDetensor,
259  DenseSet<BlockArgument> &blockArgsToDetensor) override {
260  SmallVector<Value> workList;
261 
262  func->walk([&](cf::CondBranchOp condBr) {
263  llvm::append_range(workList, condBr.getOperands());
264  });
265 
266  func->walk([&](cf::BranchOp br) {
267  llvm::append_range(workList, br.getOperands());
268  });
269 
270  DenseSet<Value> visitedValues;
271  DenseSet<Operation *> visitedOps;
272 
273  // For a (to-be-detesored) value, check if it "escapes" the block by being
274  // passed to terminator. If it does, then workList is updated with the
275  // corresponding argument to the successor block.
276  auto updateWorkListWithSuccessorArguments =
277  [&](Value value, BranchOpInterface terminator) {
278  if (!terminator)
279  return;
280 
281  for (auto operandIdx :
282  llvm::seq<unsigned>(0, terminator->getOperands().size())) {
283  Value operand = terminator->getOperand(operandIdx);
284 
285  if (operand == value) {
286  auto succBlockArg =
287  terminator.getSuccessorBlockArgument(operandIdx);
288 
289  if (succBlockArg && !blockArgsToDetensor.count(*succBlockArg))
290  workList.push_back(*succBlockArg);
291  }
292  }
293  };
294 
295  while (!workList.empty()) {
296  Value currentItem = workList.pop_back_val();
297 
298  if (!visitedValues.insert(currentItem).second)
299  continue;
300 
301  // 1 - Look forward:
302  // 1.1 - If currentItem escapes to one or more successors, add
303  // the corresponding successor arguments to workList.
304  updateWorkListWithSuccessorArguments(
305  currentItem, dyn_cast<BranchOpInterface>(
306  currentItem.getParentBlock()->getTerminator()));
307 
308  // 1.2 - For each user of currentItem, add the defined values to
309  // workList. This way, the user ops can be inspected later if they are
310  // detensorable and if so, their operands will be added to workList to
311  // potentially discover other parts of the detensorable component.
312  for (auto *user : currentItem.getUsers())
313  llvm::append_range(workList, user->getResults());
314 
315  // 2 - Look backward:
316  // 2.1 - The current item is defined by a block argument. If the owner
317  // block is a non-entry one, then:
318  // * Add the argument to blockArgsToDetensor.
319  // * Walk the use-def chain backwards to add each predecessor's
320  // terminator-operands corresponding to currentItem to workList.
321  if (dyn_cast<BlockArgument>(currentItem)) {
322  BlockArgument currentItemBlockArgument =
323  cast<BlockArgument>(currentItem);
324  Block *ownerBlock = currentItemBlockArgument.getOwner();
325 
326  // Function arguments are not detensored/converted.
327  if (&*ownerBlock->getParent()->begin() == ownerBlock)
328  continue;
329 
330  // This inner-block argument is involved in control-flow, it should be
331  // detensored.
332  blockArgsToDetensor.insert(currentItemBlockArgument);
333 
334  for (PredecessorIterator pred = ownerBlock->pred_begin();
335  pred != ownerBlock->pred_end(); ++pred) {
336  BranchOpInterface predTerminator =
337  dyn_cast<BranchOpInterface>((*pred)->getTerminator());
338 
339  // TODO: For now, we give up if any of the control-flow components
340  // in a function is not detensorable. Fix that.
341  if (!predTerminator) {
342  opsToDetensor.clear();
343  blockArgsToDetensor.clear();
344  return;
345  }
346 
347  auto ownerBlockOperands =
348  predTerminator.getSuccessorOperands(pred.getSuccessorIndex());
349 
350  if (ownerBlockOperands.empty() ||
351  ownerBlockOperands.isOperandProduced(
352  currentItemBlockArgument.getArgNumber()))
353  continue;
354 
355  // For each predecessor, add the value it passes to that argument to
356  // workList to find out how it's computed.
357  workList.push_back(
358  ownerBlockOperands[currentItemBlockArgument.getArgNumber()]);
359  }
360 
361  continue;
362  }
363 
364  Operation *currentItemDefiningOp = currentItem.getDefiningOp();
365 
366  if (!visitedOps.insert(currentItemDefiningOp).second)
367  continue;
368 
369  // 2.2 - The current item is computed by a GenericOp. If the op should
370  // be detensored, then:
371  // * Add it to opsToDetensor.
372  // * Add its operands to workList to discover other parts of the
373  // potentially detensorable component.
374  if (auto genericOp = dyn_cast<GenericOp>(currentItemDefiningOp)) {
375  // The op was encountered already, no need to inspect it again.
376  if (opsToDetensor.count(genericOp))
377  continue;
378 
379  // The op should not be detensored, give up on it but continue with
380  // discovering the rest of the control-flow component.
381  if (!shouldBeDetensored(genericOp, typeConverter)) {
382  continue;
383  }
384 
385  opsToDetensor.insert(genericOp);
386  llvm::append_range(workList, genericOp.getInputs());
387  continue;
388  }
389 
390  // 2.3 - The current item is the result of a FromElementsOp, it will be
391  // trivially detensored later as part of canonicalization patterns
392  // applied at the end of detensoring.
393  //
394  // Note: No need to check whether the result type of this op is
395  // detensorable since if it wasn't we wouldn't reach that point in the
396  // work list.
397  if (isa<tensor::FromElementsOp>(currentItemDefiningOp))
398  continue;
399 
400  // 2.4 - The current item is the result of a scalar op, add all its
401  // operands to the work list.
402  if (llvm::all_of(
403  currentItemDefiningOp->getResultTypes(),
404  [&](Type resultType) { return resultType.isIntOrFloat(); }))
405  llvm::append_range(workList, currentItemDefiningOp->getOperands());
406  }
407 
408  // Since the cost model gives up on some ops (see the details of step 2.2
409  // above), block arguments that correspond to the values produced by those
410  // ops should not be detensored as well.
411 
412  DenseSet<BlockArgument> blockArgsToRemove;
413 
414  for (auto &blockArg : blockArgsToDetensor) {
415  Block *block = blockArg.getParentBlock();
416 
417  // For the potentially detensorable block argument, find the
418  // correpsonding operands in predecessor blocks.
419  for (PredecessorIterator pred = block->pred_begin();
420  pred != block->pred_end(); ++pred) {
421  BranchOpInterface terminator =
422  dyn_cast<BranchOpInterface>((*pred)->getTerminator());
423  auto blockOperands =
424  terminator.getSuccessorOperands(pred.getSuccessorIndex());
425 
426  if (blockOperands.empty() ||
427  blockOperands.isOperandProduced(blockArg.getArgNumber()))
428  continue;
429 
430  Operation *definingOp =
431  blockOperands[blockArg.getArgNumber()].getDefiningOp();
432 
433  // If the operand is defined by a GenericOp that will not be
434  // detensored, then do not detensor the corresponding block argument.
435  if (isa_and_nonnull<GenericOp>(definingOp) &&
436  opsToDetensor.count(definingOp) == 0) {
437  blockArgsToRemove.insert(blockArg);
438  break;
439  }
440  }
441  }
442 
443  for (auto &blockArg : blockArgsToRemove) {
444  blockArgsToDetensor.erase(blockArg);
445  }
446  }
447  };
448 
449  /// Detensorize everything that can detensored.
450  class AggressiveDetensoringModel : public CostModel {
451  public:
452  void compute(FunctionOpInterface func,
453  DetensorizeTypeConverter typeConverter,
454  DenseSet<Operation *> &opsToDetensor,
455  DenseSet<BlockArgument> &blockArgsToDetensor) override {
456  func->walk([&](GenericOp genericOp) {
457  if (shouldBeDetensored(genericOp, typeConverter))
458  opsToDetensor.insert(genericOp);
459  });
460 
461  for (Block &block : llvm::drop_begin(func.getFunctionBody(), 1))
462  for (BlockArgument blockArgument : block.getArguments())
463  blockArgsToDetensor.insert(blockArgument);
464  }
465  };
466 
467  void runOnOperation() override {
468  MLIRContext *context = &getContext();
469  DetensorizeTypeConverter typeConverter;
470  RewritePatternSet patterns(context);
471  ConversionTarget target(*context);
472  DenseSet<Operation *> opsToDetensor;
473  DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
474  DenseSet<BlockArgument> blockArgsToDetensor;
475  FunctionOpInterface funcOp = getOperation();
476 
477  if (funcOp.getFunctionBody().empty())
478  return;
479 
480  // Make sure the entry block of the function doesn't contain any Linalg ops.
481  // Otherwise, it may lead to the signature of the block being changed by the
482  // dialect conversion below, which would make the function op invalid
483  // because its type shouldn't change.
484  IRRewriter rewriter(funcOp->getContext());
485  Block *entryBlock = &funcOp.getFunctionBody().front();
486  Block *postEntryBlock =
487  rewriter.splitBlock(entryBlock, entryBlock->begin());
488  rewriter.setInsertionPointToStart(entryBlock);
489  auto branch =
490  rewriter.create<cf::BranchOp>(rewriter.getUnknownLoc(), postEntryBlock);
491 
492  if (aggressiveMode.getValue()) {
493  AggressiveDetensoringModel costModel;
494  costModel.compute(funcOp, typeConverter, opsToDetensor,
495  blockArgsToDetensor);
496  } else {
497  ControlFlowDetectionModel costModel;
498  costModel.compute(funcOp, typeConverter, opsToDetensor,
499  blockArgsToDetensor);
500  }
501 
502  detensorableBranchOps =
503  CostModel::computeBranchOpDetensoring(blockArgsToDetensor);
504 
505  target.addDynamicallyLegalOp<GenericOp>(
506  [&](GenericOp op) { return !opsToDetensor.count(op); });
507 
508  target.markUnknownOpDynamicallyLegal([&](Operation *op) {
509  // A function is legal if all of its non-entry blocks are legal. We
510  // don't legalize the entry block (i.e. the function's signature)
511  // since detensoring can't happen along external calling convention
512  // boundaries, which we conservatively approximate as all function
513  // signatures.
514  if (auto funcOp = dyn_cast<FunctionOpInterface>(op)) {
515  Region &body = funcOp.getFunctionBody();
516  return llvm::all_of(llvm::drop_begin(body, 1), [&](Block &block) {
517  return !llvm::any_of(
518  blockArgsToDetensor, [&](BlockArgument blockArgument) {
519  return blockArgument.getOwner() == &block &&
520  !typeConverter.isLegal(blockArgument.getType());
521  });
522  });
523  }
524 
526  isLegalForReturnOpTypeConversionPattern(op, typeConverter,
527  /*returnOpAlwaysLegal*/ true))
528  return true;
529 
530  if (auto branchOp = dyn_cast<BranchOpInterface>(op)) {
531  if (!detensorableBranchOps.count(branchOp))
532  return true;
533 
534  for (auto operandIdx : detensorableBranchOps[branchOp])
535  if (!typeConverter.isLegal(
536  branchOp->getOperand(operandIdx).getType()))
537  return false;
538 
539  return true;
540  }
541 
542  return false;
543  });
544 
545  patterns.add<DetensorizeGenericOp>(typeConverter, context);
546  patterns.add<FunctionNonEntryBlockConversion>(context, typeConverter,
547  blockArgsToDetensor);
548  // Since non-entry block arguments get detensorized, we also need to
549  // update the control flow inside the function to reflect the correct
550  // types.
551  auto shouldConvertBranchOperand = [&](BranchOpInterface branchOp,
552  int operandIdx) -> bool {
553  return detensorableBranchOps.count(branchOp) &&
554  detensorableBranchOps[branchOp].count(operandIdx);
555  };
556 
557  populateBranchOpInterfaceTypeConversionPattern(patterns, typeConverter,
558  shouldConvertBranchOperand);
559 
560  if (failed(
561  applyFullConversion(getOperation(), target, std::move(patterns))))
562  signalPassFailure();
563 
564  RewritePatternSet canonPatterns(context);
565  tensor::FromElementsOp::getCanonicalizationPatterns(canonPatterns, context);
566  if (failed(applyPatternsAndFoldGreedily(getOperation(),
567  std::move(canonPatterns))))
568  signalPassFailure();
569 
570  // Get rid of the dummy entry block we created in the beginning to work
571  // around dialect conversion signature rewriting.
572  rewriter.eraseOp(branch);
573  rewriter.mergeBlocks(postEntryBlock, entryBlock);
574  }
575 };
576 } // namespace
static Value sourceMaterializationCallback(OpBuilder &builder, Type type, ValueRange inputs, Location loc)
Definition: Detensorize.cpp:31
static MLIRContext * getContext(OpFoldResult val)
This class represents an argument of a Block.
Definition: Value.h:319
Block * getOwner() const
Returns the block that owns this argument.
Definition: Value.h:328
unsigned getArgNumber() const
Returns the number of this argument.
Definition: Value.h:331
Block represents an ordered list of Operations.
Definition: Block.h:33
OpListType::iterator iterator
Definition: Block.h:140
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
Definition: Block.cpp:29
pred_iterator pred_begin()
Definition: Block.h:233
Operation * getTerminator()
Get the terminator operation of this block.
Definition: Block.cpp:246
BlockArgListType getArguments()
Definition: Block.h:87
Operation & front()
Definition: Block.h:153
iterator begin()
Definition: Block.h:143
pred_iterator pred_end()
Definition: Block.h:236
Location getUnknownLoc()
Definition: Builders.cpp:27
This class implements a pattern rewriter for use with ConversionPatterns.
void replaceOp(Operation *op, ValueRange newValues) override
Replace the given operation with the new values.
Block * applySignatureConversion(Block *block, TypeConverter::SignatureConversion &conversion, const TypeConverter *converter=nullptr)
Apply a signature conversion to given block.
void startOpModification(Operation *op) override
PatternRewriter hook for updating the given operation in-place.
void eraseOp(Operation *op) override
PatternRewriter hook for erasing a dead operation.
void finalizeOpModification(Operation *op) override
PatternRewriter hook for updating the given operation in-place.
This class describes a specific conversion target.
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
Definition: PatternMatch.h:772
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:215
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:439
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
OpConversionPattern is a wrapper around ConversionPattern that allows for matching and rewriting agai...
OpConversionPattern(MLIRContext *context, PatternBenefit benefit=1)
OpInterfaceConversionPattern is a wrapper around ConversionPattern that allows for matching and rewri...
This class represents an operand of an operation.
Definition: Value.h:267
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
result_type_range getResultTypes()
Definition: Operation.h:423
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:373
Implement a predecessor iterator for blocks.
Definition: BlockSupport.h:51
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
iterator begin()
Definition: Region.h:55
Block * splitBlock(Block *block, Block::iterator before)
Split the operations starting at "before" (inclusive) out of the given block into a new block,...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues=std::nullopt)
Inline the operations of block 'source' into the end of block 'dest'.
void inlineRegionBefore(Region &region, Region &parent, Region::iterator before)
Move the blocks that belong to "region" before the given position in another region "parent".
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:102
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
Type getElementType() const
Returns the element type of this tensor type.
This class provides all of the information necessary to convert a type signature.
Type conversion class.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
type_range getType() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
Block * getParentBlock()
Return the Block in which this Value is defined.
Definition: Value.cpp:48
user_range getUsers() const
Definition: Value.h:228
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Include the generated interface declarations.
bool isLegalForReturnOpTypeConversionPattern(Operation *op, const TypeConverter &converter, bool returnOpAlwaysLegal=false)
For ReturnLike ops (except return), return True.
LogicalResult applyFullConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Apply a complete conversion on the given operations, and all nested operations.
bool isNotBranchOpInterfaceOrReturnLikeOp(Operation *op)
Return true if op is neither BranchOpInterface nor ReturnLike.
LogicalResult applyPatternsAndFoldGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
void populateBranchOpInterfaceTypeConversionPattern(RewritePatternSet &patterns, const TypeConverter &converter, function_ref< bool(BranchOpInterface branchOp, int idx)> shouldConvertBranchOperand=nullptr)
Add a pattern to the given pattern list to rewrite branch operations to use operands that have been l...
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...