MLIR  17.0.0git
AsyncParallelFor.cpp
Go to the documentation of this file.
1 //===- AsyncParallelFor.cpp - Implementation of Async Parallel For --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements scf.parallel to scf.for + async.execute conversion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
15 #include "PassDetail.h"
21 #include "mlir/IR/IRMapping.h"
23 #include "mlir/IR/Matchers.h"
24 #include "mlir/IR/PatternMatch.h"
25 #include "mlir/Support/LLVM.h"
28 #include <utility>
29 
30 namespace mlir {
31 #define GEN_PASS_DEF_ASYNCPARALLELFOR
32 #include "mlir/Dialect/Async/Passes.h.inc"
33 } // namespace mlir
34 
35 using namespace mlir;
36 using namespace mlir::async;
37 
38 #define DEBUG_TYPE "async-parallel-for"
39 
40 namespace {
41 
42 // Rewrite scf.parallel operation into multiple concurrent async.execute
43 // operations over non overlapping subranges of the original loop.
44 //
45 // Example:
46 //
47 // scf.parallel (%i, %j) = (%lbi, %lbj) to (%ubi, %ubj) step (%si, %sj) {
48 // "do_some_compute"(%i, %j): () -> ()
49 // }
50 //
51 // Converted to:
52 //
53 // // Parallel compute function that executes the parallel body region for
54 // // a subset of the parallel iteration space defined by the one-dimensional
55 // // compute block index.
56 // func parallel_compute_function(%block_index : index, %block_size : index,
57 // <parallel operation properties>, ...) {
58 // // Compute multi-dimensional loop bounds for %block_index.
59 // %block_lbi, %block_lbj = ...
60 // %block_ubi, %block_ubj = ...
61 //
62 // // Clone parallel operation body into the scf.for loop nest.
63 // scf.for %i = %blockLbi to %blockUbi {
64 // scf.for %j = block_lbj to %block_ubj {
65 // "do_some_compute"(%i, %j): () -> ()
66 // }
67 // }
68 // }
69 //
70 // And a dispatch function depending on the `asyncDispatch` option.
71 //
72 // When async dispatch is on: (pseudocode)
73 //
74 // %block_size = ... compute parallel compute block size
75 // %block_count = ... compute the number of compute blocks
76 //
77 // func @async_dispatch(%block_start : index, %block_end : index, ...) {
78 // // Keep splitting block range until we reached a range of size 1.
79 // while (%block_end - %block_start > 1) {
80 // %mid_index = block_start + (block_end - block_start) / 2;
81 // async.execute { call @async_dispatch(%mid_index, %block_end); }
82 // %block_end = %mid_index
83 // }
84 //
85 // // Call parallel compute function for a single block.
86 // call @parallel_compute_fn(%block_start, %block_size, ...);
87 // }
88 //
89 // // Launch async dispatch for [0, block_count) range.
90 // call @async_dispatch(%c0, %block_count);
91 //
92 // When async dispatch is off:
93 //
94 // %block_size = ... compute parallel compute block size
95 // %block_count = ... compute the number of compute blocks
96 //
97 // scf.for %block_index = %c0 to %block_count {
98 // call @parallel_compute_fn(%block_index, %block_size, ...)
99 // }
100 //
101 struct AsyncParallelForPass
102  : public impl::AsyncParallelForBase<AsyncParallelForPass> {
103  AsyncParallelForPass() = default;
104 
105  AsyncParallelForPass(bool asyncDispatch, int32_t numWorkerThreads,
106  int32_t minTaskSize) {
107  this->asyncDispatch = asyncDispatch;
108  this->numWorkerThreads = numWorkerThreads;
109  this->minTaskSize = minTaskSize;
110  }
111 
112  void runOnOperation() override;
113 };
114 
115 struct AsyncParallelForRewrite : public OpRewritePattern<scf::ParallelOp> {
116 public:
117  AsyncParallelForRewrite(
118  MLIRContext *ctx, bool asyncDispatch, int32_t numWorkerThreads,
119  AsyncMinTaskSizeComputationFunction computeMinTaskSize)
120  : OpRewritePattern(ctx), asyncDispatch(asyncDispatch),
121  numWorkerThreads(numWorkerThreads),
122  computeMinTaskSize(std::move(computeMinTaskSize)) {}
123 
124  LogicalResult matchAndRewrite(scf::ParallelOp op,
125  PatternRewriter &rewriter) const override;
126 
127 private:
128  bool asyncDispatch;
129  int32_t numWorkerThreads;
130  AsyncMinTaskSizeComputationFunction computeMinTaskSize;
131 };
132 
133 struct ParallelComputeFunctionType {
134  FunctionType type;
135  SmallVector<Value> captures;
136 };
137 
138 // Helper struct to parse parallel compute function argument list.
139 struct ParallelComputeFunctionArgs {
140  BlockArgument blockIndex();
141  BlockArgument blockSize();
142  ArrayRef<BlockArgument> tripCounts();
143  ArrayRef<BlockArgument> lowerBounds();
144  ArrayRef<BlockArgument> upperBounds();
145  ArrayRef<BlockArgument> steps();
146  ArrayRef<BlockArgument> captures();
147 
148  unsigned numLoops;
150 };
151 
152 struct ParallelComputeFunctionBounds {
153  SmallVector<IntegerAttr> tripCounts;
154  SmallVector<IntegerAttr> lowerBounds;
155  SmallVector<IntegerAttr> upperBounds;
157 };
158 
159 struct ParallelComputeFunction {
160  unsigned numLoops;
161  func::FuncOp func;
162  llvm::SmallVector<Value> captures;
163 };
164 
165 } // namespace
166 
167 BlockArgument ParallelComputeFunctionArgs::blockIndex() { return args[0]; }
168 BlockArgument ParallelComputeFunctionArgs::blockSize() { return args[1]; }
169 
170 ArrayRef<BlockArgument> ParallelComputeFunctionArgs::tripCounts() {
171  return args.drop_front(2).take_front(numLoops);
172 }
173 
174 ArrayRef<BlockArgument> ParallelComputeFunctionArgs::lowerBounds() {
175  return args.drop_front(2 + 1 * numLoops).take_front(numLoops);
176 }
177 
178 ArrayRef<BlockArgument> ParallelComputeFunctionArgs::upperBounds() {
179  return args.drop_front(2 + 2 * numLoops).take_front(numLoops);
180 }
181 
182 ArrayRef<BlockArgument> ParallelComputeFunctionArgs::steps() {
183  return args.drop_front(2 + 3 * numLoops).take_front(numLoops);
184 }
185 
186 ArrayRef<BlockArgument> ParallelComputeFunctionArgs::captures() {
187  return args.drop_front(2 + 4 * numLoops);
188 }
189 
190 template <typename ValueRange>
192  SmallVector<IntegerAttr> attrs(values.size());
193  for (unsigned i = 0; i < values.size(); ++i)
194  matchPattern(values[i], m_Constant(&attrs[i]));
195  return attrs;
196 }
197 
198 // Converts one-dimensional iteration index in the [0, tripCount) interval
199 // into multidimensional iteration coordinate.
201  ArrayRef<Value> tripCounts) {
202  SmallVector<Value> coords(tripCounts.size());
203  assert(!tripCounts.empty() && "tripCounts must be not empty");
204 
205  for (ssize_t i = tripCounts.size() - 1; i >= 0; --i) {
206  coords[i] = b.create<arith::RemSIOp>(index, tripCounts[i]);
207  index = b.create<arith::DivSIOp>(index, tripCounts[i]);
208  }
209 
210  return coords;
211 }
212 
213 // Returns a function type and implicit captures for a parallel compute
214 // function. We'll need a list of implicit captures to setup block and value
215 // mapping when we'll clone the body of the parallel operation.
216 static ParallelComputeFunctionType
217 getParallelComputeFunctionType(scf::ParallelOp op, PatternRewriter &rewriter) {
218  // Values implicitly captured by the parallel operation.
219  llvm::SetVector<Value> captures;
220  getUsedValuesDefinedAbove(op.getRegion(), op.getRegion(), captures);
221 
222  SmallVector<Type> inputs;
223  inputs.reserve(2 + 4 * op.getNumLoops() + captures.size());
224 
225  Type indexTy = rewriter.getIndexType();
226 
227  // One-dimensional iteration space defined by the block index and size.
228  inputs.push_back(indexTy); // blockIndex
229  inputs.push_back(indexTy); // blockSize
230 
231  // Multi-dimensional parallel iteration space defined by the loop trip counts.
232  for (unsigned i = 0; i < op.getNumLoops(); ++i)
233  inputs.push_back(indexTy); // loop tripCount
234 
235  // Parallel operation lower bound, upper bound and step. Lower bound, upper
236  // bound and step passed as contiguous arguments:
237  // call @compute(%lb0, %lb1, ..., %ub0, %ub1, ..., %step0, %step1, ...)
238  for (unsigned i = 0; i < op.getNumLoops(); ++i) {
239  inputs.push_back(indexTy); // lower bound
240  inputs.push_back(indexTy); // upper bound
241  inputs.push_back(indexTy); // step
242  }
243 
244  // Types of the implicit captures.
245  for (Value capture : captures)
246  inputs.push_back(capture.getType());
247 
248  // Convert captures to vector for later convenience.
249  SmallVector<Value> capturesVector(captures.begin(), captures.end());
250  return {rewriter.getFunctionType(inputs, TypeRange()), capturesVector};
251 }
252 
253 // Create a parallel compute fuction from the parallel operation.
254 static ParallelComputeFunction createParallelComputeFunction(
255  scf::ParallelOp op, const ParallelComputeFunctionBounds &bounds,
256  unsigned numBlockAlignedInnerLoops, PatternRewriter &rewriter) {
257  OpBuilder::InsertionGuard guard(rewriter);
258  ImplicitLocOpBuilder b(op.getLoc(), rewriter);
259 
260  ModuleOp module = op->getParentOfType<ModuleOp>();
261 
262  ParallelComputeFunctionType computeFuncType =
263  getParallelComputeFunctionType(op, rewriter);
264 
265  FunctionType type = computeFuncType.type;
266  func::FuncOp func = func::FuncOp::create(
267  op.getLoc(),
268  numBlockAlignedInnerLoops > 0 ? "parallel_compute_fn_with_aligned_loops"
269  : "parallel_compute_fn",
270  type);
271  func.setPrivate();
272 
273  // Insert function into the module symbol table and assign it unique name.
274  SymbolTable symbolTable(module);
275  symbolTable.insert(func);
276  rewriter.getListener()->notifyOperationInserted(func);
277 
278  // Create function entry block.
279  Block *block =
280  b.createBlock(&func.getBody(), func.begin(), type.getInputs(),
281  SmallVector<Location>(type.getNumInputs(), op.getLoc()));
282  b.setInsertionPointToEnd(block);
283 
284  ParallelComputeFunctionArgs args = {op.getNumLoops(), func.getArguments()};
285 
286  // Block iteration position defined by the block index and size.
287  BlockArgument blockIndex = args.blockIndex();
288  BlockArgument blockSize = args.blockSize();
289 
290  // Constants used below.
291  Value c0 = b.create<arith::ConstantIndexOp>(0);
292  Value c1 = b.create<arith::ConstantIndexOp>(1);
293 
294  // Materialize known constants as constant operation in the function body.
295  auto values = [&](ArrayRef<BlockArgument> args, ArrayRef<IntegerAttr> attrs) {
296  return llvm::to_vector(
297  llvm::map_range(llvm::zip(args, attrs), [&](auto tuple) -> Value {
298  if (IntegerAttr attr = std::get<1>(tuple))
299  return b.create<arith::ConstantOp>(attr);
300  return std::get<0>(tuple);
301  }));
302  };
303 
304  // Multi-dimensional parallel iteration space defined by the loop trip counts.
305  auto tripCounts = values(args.tripCounts(), bounds.tripCounts);
306 
307  // Parallel operation lower bound and step.
308  auto lowerBounds = values(args.lowerBounds(), bounds.lowerBounds);
309  auto steps = values(args.steps(), bounds.steps);
310 
311  // Remaining arguments are implicit captures of the parallel operation.
312  ArrayRef<BlockArgument> captures = args.captures();
313 
314  // Compute a product of trip counts to get the size of the flattened
315  // one-dimensional iteration space.
316  Value tripCount = tripCounts[0];
317  for (unsigned i = 1; i < tripCounts.size(); ++i)
318  tripCount = b.create<arith::MulIOp>(tripCount, tripCounts[i]);
319 
320  // Find one-dimensional iteration bounds: [blockFirstIndex, blockLastIndex]:
321  // blockFirstIndex = blockIndex * blockSize
322  Value blockFirstIndex = b.create<arith::MulIOp>(blockIndex, blockSize);
323 
324  // The last one-dimensional index in the block defined by the `blockIndex`:
325  // blockLastIndex = min(blockFirstIndex + blockSize, tripCount) - 1
326  Value blockEnd0 = b.create<arith::AddIOp>(blockFirstIndex, blockSize);
327  Value blockEnd1 = b.create<arith::MinSIOp>(blockEnd0, tripCount);
328  Value blockLastIndex = b.create<arith::SubIOp>(blockEnd1, c1);
329 
330  // Convert one-dimensional indices to multi-dimensional coordinates.
331  auto blockFirstCoord = delinearize(b, blockFirstIndex, tripCounts);
332  auto blockLastCoord = delinearize(b, blockLastIndex, tripCounts);
333 
334  // Compute loops upper bounds derived from the block last coordinates:
335  // blockEndCoord[i] = blockLastCoord[i] + 1
336  //
337  // Block first and last coordinates can be the same along the outer compute
338  // dimension when inner compute dimension contains multiple blocks.
339  SmallVector<Value> blockEndCoord(op.getNumLoops());
340  for (size_t i = 0; i < blockLastCoord.size(); ++i)
341  blockEndCoord[i] = b.create<arith::AddIOp>(blockLastCoord[i], c1);
342 
343  // Construct a loop nest out of scf.for operations that will iterate over
344  // all coordinates in [blockFirstCoord, blockLastCoord] range.
345  using LoopBodyBuilder =
346  std::function<void(OpBuilder &, Location, Value, ValueRange)>;
347  using LoopNestBuilder = std::function<LoopBodyBuilder(size_t loopIdx)>;
348 
349  // Parallel region induction variables computed from the multi-dimensional
350  // iteration coordinate using parallel operation bounds and step:
351  //
352  // computeBlockInductionVars[loopIdx] =
353  // lowerBound[loopIdx] + blockCoord[loopIdx] * step[loopIdx]
354  SmallVector<Value> computeBlockInductionVars(op.getNumLoops());
355 
356  // We need to know if we are in the first or last iteration of the
357  // multi-dimensional loop for each loop in the nest, so we can decide what
358  // loop bounds should we use for the nested loops: bounds defined by compute
359  // block interval, or bounds defined by the parallel operation.
360  //
361  // Example: 2d parallel operation
362  // i j
363  // loop sizes: [50, 50]
364  // first coord: [25, 25]
365  // last coord: [30, 30]
366  //
367  // If `i` is equal to 25 then iteration over `j` should start at 25, when `i`
368  // is between 25 and 30 it should start at 0. The upper bound for `j` should
369  // be 50, except when `i` is equal to 30, then it should also be 30.
370  //
371  // Value at ith position specifies if all loops in [0, i) range of the loop
372  // nest are in the first/last iteration.
373  SmallVector<Value> isBlockFirstCoord(op.getNumLoops());
374  SmallVector<Value> isBlockLastCoord(op.getNumLoops());
375 
376  // Builds inner loop nest inside async.execute operation that does all the
377  // work concurrently.
378  LoopNestBuilder workLoopBuilder = [&](size_t loopIdx) -> LoopBodyBuilder {
379  return [&, loopIdx](OpBuilder &nestedBuilder, Location loc, Value iv,
380  ValueRange args) {
381  ImplicitLocOpBuilder b(loc, nestedBuilder);
382 
383  // Compute induction variable for `loopIdx`.
384  computeBlockInductionVars[loopIdx] = b.create<arith::AddIOp>(
385  lowerBounds[loopIdx], b.create<arith::MulIOp>(iv, steps[loopIdx]));
386 
387  // Check if we are inside first or last iteration of the loop.
388  isBlockFirstCoord[loopIdx] = b.create<arith::CmpIOp>(
389  arith::CmpIPredicate::eq, iv, blockFirstCoord[loopIdx]);
390  isBlockLastCoord[loopIdx] = b.create<arith::CmpIOp>(
391  arith::CmpIPredicate::eq, iv, blockLastCoord[loopIdx]);
392 
393  // Check if the previous loop is in its first or last iteration.
394  if (loopIdx > 0) {
395  isBlockFirstCoord[loopIdx] = b.create<arith::AndIOp>(
396  isBlockFirstCoord[loopIdx], isBlockFirstCoord[loopIdx - 1]);
397  isBlockLastCoord[loopIdx] = b.create<arith::AndIOp>(
398  isBlockLastCoord[loopIdx], isBlockLastCoord[loopIdx - 1]);
399  }
400 
401  // Keep building loop nest.
402  if (loopIdx < op.getNumLoops() - 1) {
403  if (loopIdx + 1 >= op.getNumLoops() - numBlockAlignedInnerLoops) {
404  // For block aligned loops we always iterate starting from 0 up to
405  // the loop trip counts.
406  b.create<scf::ForOp>(c0, tripCounts[loopIdx + 1], c1, ValueRange(),
407  workLoopBuilder(loopIdx + 1));
408 
409  } else {
410  // Select nested loop lower/upper bounds depending on our position in
411  // the multi-dimensional iteration space.
412  auto lb = b.create<arith::SelectOp>(isBlockFirstCoord[loopIdx],
413  blockFirstCoord[loopIdx + 1], c0);
414 
415  auto ub = b.create<arith::SelectOp>(isBlockLastCoord[loopIdx],
416  blockEndCoord[loopIdx + 1],
417  tripCounts[loopIdx + 1]);
418 
419  b.create<scf::ForOp>(lb, ub, c1, ValueRange(),
420  workLoopBuilder(loopIdx + 1));
421  }
422 
423  b.create<scf::YieldOp>(loc);
424  return;
425  }
426 
427  // Copy the body of the parallel op into the inner-most loop.
428  IRMapping mapping;
429  mapping.map(op.getInductionVars(), computeBlockInductionVars);
430  mapping.map(computeFuncType.captures, captures);
431 
432  for (auto &bodyOp : op.getLoopBody().getOps())
433  b.clone(bodyOp, mapping);
434  };
435  };
436 
437  b.create<scf::ForOp>(blockFirstCoord[0], blockEndCoord[0], c1, ValueRange(),
438  workLoopBuilder(0));
439  b.create<func::ReturnOp>(ValueRange());
440 
441  return {op.getNumLoops(), func, std::move(computeFuncType.captures)};
442 }
443 
444 // Creates recursive async dispatch function for the given parallel compute
445 // function. Dispatch function keeps splitting block range into halves until it
446 // reaches a single block, and then excecutes it inline.
447 //
448 // Function pseudocode (mix of C++ and MLIR):
449 //
450 // func @async_dispatch(%block_start : index, %block_end : index, ...) {
451 //
452 // // Keep splitting block range until we reached a range of size 1.
453 // while (%block_end - %block_start > 1) {
454 // %mid_index = block_start + (block_end - block_start) / 2;
455 // async.execute { call @async_dispatch(%mid_index, %block_end); }
456 // %block_end = %mid_index
457 // }
458 //
459 // // Call parallel compute function for a single block.
460 // call @parallel_compute_fn(%block_start, %block_size, ...);
461 // }
462 //
463 static func::FuncOp
464 createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
465  PatternRewriter &rewriter) {
466  OpBuilder::InsertionGuard guard(rewriter);
467  Location loc = computeFunc.func.getLoc();
468  ImplicitLocOpBuilder b(loc, rewriter);
469 
470  ModuleOp module = computeFunc.func->getParentOfType<ModuleOp>();
471 
472  ArrayRef<Type> computeFuncInputTypes =
473  computeFunc.func.getFunctionType().getInputs();
474 
475  // Compared to the parallel compute function async dispatch function takes
476  // additional !async.group argument. Also instead of a single `blockIndex` it
477  // takes `blockStart` and `blockEnd` arguments to define the range of
478  // dispatched blocks.
479  SmallVector<Type> inputTypes;
480  inputTypes.push_back(async::GroupType::get(rewriter.getContext()));
481  inputTypes.push_back(rewriter.getIndexType()); // add blockStart argument
482  inputTypes.append(computeFuncInputTypes.begin(), computeFuncInputTypes.end());
483 
484  FunctionType type = rewriter.getFunctionType(inputTypes, TypeRange());
485  func::FuncOp func = func::FuncOp::create(loc, "async_dispatch_fn", type);
486  func.setPrivate();
487 
488  // Insert function into the module symbol table and assign it unique name.
489  SymbolTable symbolTable(module);
490  symbolTable.insert(func);
491  rewriter.getListener()->notifyOperationInserted(func);
492 
493  // Create function entry block.
494  Block *block = b.createBlock(&func.getBody(), func.begin(), type.getInputs(),
495  SmallVector<Location>(type.getNumInputs(), loc));
496  b.setInsertionPointToEnd(block);
497 
498  Type indexTy = b.getIndexType();
499  Value c1 = b.create<arith::ConstantIndexOp>(1);
500  Value c2 = b.create<arith::ConstantIndexOp>(2);
501 
502  // Get the async group that will track async dispatch completion.
503  Value group = block->getArgument(0);
504 
505  // Get the block iteration range: [blockStart, blockEnd)
506  Value blockStart = block->getArgument(1);
507  Value blockEnd = block->getArgument(2);
508 
509  // Create a work splitting while loop for the [blockStart, blockEnd) range.
510  SmallVector<Type> types = {indexTy, indexTy};
511  SmallVector<Value> operands = {blockStart, blockEnd};
512  SmallVector<Location> locations = {loc, loc};
513 
514  // Create a recursive dispatch loop.
515  scf::WhileOp whileOp = b.create<scf::WhileOp>(types, operands);
516  Block *before = b.createBlock(&whileOp.getBefore(), {}, types, locations);
517  Block *after = b.createBlock(&whileOp.getAfter(), {}, types, locations);
518 
519  // Setup dispatch loop condition block: decide if we need to go into the
520  // `after` block and launch one more async dispatch.
521  {
522  b.setInsertionPointToEnd(before);
523  Value start = before->getArgument(0);
524  Value end = before->getArgument(1);
525  Value distance = b.create<arith::SubIOp>(end, start);
526  Value dispatch =
527  b.create<arith::CmpIOp>(arith::CmpIPredicate::sgt, distance, c1);
528  b.create<scf::ConditionOp>(dispatch, before->getArguments());
529  }
530 
531  // Setup the async dispatch loop body: recursively call dispatch function
532  // for the seconds half of the original range and go to the next iteration.
533  {
534  b.setInsertionPointToEnd(after);
535  Value start = after->getArgument(0);
536  Value end = after->getArgument(1);
537  Value distance = b.create<arith::SubIOp>(end, start);
538  Value halfDistance = b.create<arith::DivSIOp>(distance, c2);
539  Value midIndex = b.create<arith::AddIOp>(start, halfDistance);
540 
541  // Call parallel compute function inside the async.execute region.
542  auto executeBodyBuilder = [&](OpBuilder &executeBuilder,
543  Location executeLoc, ValueRange executeArgs) {
544  // Update the original `blockStart` and `blockEnd` with new range.
545  SmallVector<Value> operands{block->getArguments().begin(),
546  block->getArguments().end()};
547  operands[1] = midIndex;
548  operands[2] = end;
549 
550  executeBuilder.create<func::CallOp>(executeLoc, func.getSymName(),
551  func.getCallableResults(), operands);
552  executeBuilder.create<async::YieldOp>(executeLoc, ValueRange());
553  };
554 
555  // Create async.execute operation to dispatch half of the block range.
556  auto execute = b.create<ExecuteOp>(TypeRange(), ValueRange(), ValueRange(),
557  executeBodyBuilder);
558  b.create<AddToGroupOp>(indexTy, execute.getToken(), group);
559  b.create<scf::YieldOp>(ValueRange({start, midIndex}));
560  }
561 
562  // After dispatching async operations to process the tail of the block range
563  // call the parallel compute function for the first block of the range.
564  b.setInsertionPointAfter(whileOp);
565 
566  // Drop async dispatch specific arguments: async group, block start and end.
567  auto forwardedInputs = block->getArguments().drop_front(3);
568  SmallVector<Value> computeFuncOperands = {blockStart};
569  computeFuncOperands.append(forwardedInputs.begin(), forwardedInputs.end());
570 
571  b.create<func::CallOp>(computeFunc.func.getSymName(),
572  computeFunc.func.getCallableResults(),
573  computeFuncOperands);
574  b.create<func::ReturnOp>(ValueRange());
575 
576  return func;
577 }
578 
579 // Launch async dispatch of the parallel compute function.
581  ParallelComputeFunction &parallelComputeFunction,
582  scf::ParallelOp op, Value blockSize,
583  Value blockCount,
584  const SmallVector<Value> &tripCounts) {
585  MLIRContext *ctx = op->getContext();
586 
587  // Add one more level of indirection to dispatch parallel compute functions
588  // using async operations and recursive work splitting.
589  func::FuncOp asyncDispatchFunction =
590  createAsyncDispatchFunction(parallelComputeFunction, rewriter);
591 
592  Value c0 = b.create<arith::ConstantIndexOp>(0);
593  Value c1 = b.create<arith::ConstantIndexOp>(1);
594 
595  // Appends operands shared by async dispatch and parallel compute functions to
596  // the given operands vector.
597  auto appendBlockComputeOperands = [&](SmallVector<Value> &operands) {
598  operands.append(tripCounts);
599  operands.append(op.getLowerBound().begin(), op.getLowerBound().end());
600  operands.append(op.getUpperBound().begin(), op.getUpperBound().end());
601  operands.append(op.getStep().begin(), op.getStep().end());
602  operands.append(parallelComputeFunction.captures);
603  };
604 
605  // Check if the block size is one, in this case we can skip the async dispatch
606  // completely. If this will be known statically, then canonicalization will
607  // erase async group operations.
608  Value isSingleBlock =
609  b.create<arith::CmpIOp>(arith::CmpIPredicate::eq, blockCount, c1);
610 
611  auto syncDispatch = [&](OpBuilder &nestedBuilder, Location loc) {
612  ImplicitLocOpBuilder b(loc, nestedBuilder);
613 
614  // Call parallel compute function for the single block.
615  SmallVector<Value> operands = {c0, blockSize};
616  appendBlockComputeOperands(operands);
617 
618  b.create<func::CallOp>(parallelComputeFunction.func.getSymName(),
619  parallelComputeFunction.func.getCallableResults(),
620  operands);
621  b.create<scf::YieldOp>();
622  };
623 
624  auto asyncDispatch = [&](OpBuilder &nestedBuilder, Location loc) {
625  ImplicitLocOpBuilder b(loc, nestedBuilder);
626 
627  // Create an async.group to wait on all async tokens from the concurrent
628  // execution of multiple parallel compute function. First block will be
629  // executed synchronously in the caller thread.
630  Value groupSize = b.create<arith::SubIOp>(blockCount, c1);
631  Value group = b.create<CreateGroupOp>(GroupType::get(ctx), groupSize);
632 
633  // Launch async dispatch function for [0, blockCount) range.
634  SmallVector<Value> operands = {group, c0, blockCount, blockSize};
635  appendBlockComputeOperands(operands);
636 
637  b.create<func::CallOp>(asyncDispatchFunction.getSymName(),
638  asyncDispatchFunction.getCallableResults(),
639  operands);
640 
641  // Wait for the completion of all parallel compute operations.
642  b.create<AwaitAllOp>(group);
643 
644  b.create<scf::YieldOp>();
645  };
646 
647  // Dispatch either single block compute function, or launch async dispatch.
648  b.create<scf::IfOp>(isSingleBlock, syncDispatch, asyncDispatch);
649 }
650 
651 // Dispatch parallel compute functions by submitting all async compute tasks
652 // from a simple for loop in the caller thread.
653 static void
655  ParallelComputeFunction &parallelComputeFunction,
656  scf::ParallelOp op, Value blockSize, Value blockCount,
657  const SmallVector<Value> &tripCounts) {
658  MLIRContext *ctx = op->getContext();
659 
660  func::FuncOp compute = parallelComputeFunction.func;
661 
662  Value c0 = b.create<arith::ConstantIndexOp>(0);
663  Value c1 = b.create<arith::ConstantIndexOp>(1);
664 
665  // Create an async.group to wait on all async tokens from the concurrent
666  // execution of multiple parallel compute function. First block will be
667  // executed synchronously in the caller thread.
668  Value groupSize = b.create<arith::SubIOp>(blockCount, c1);
669  Value group = b.create<CreateGroupOp>(GroupType::get(ctx), groupSize);
670 
671  // Call parallel compute function for all blocks.
672  using LoopBodyBuilder =
673  std::function<void(OpBuilder &, Location, Value, ValueRange)>;
674 
675  // Returns parallel compute function operands to process the given block.
676  auto computeFuncOperands = [&](Value blockIndex) -> SmallVector<Value> {
677  SmallVector<Value> computeFuncOperands = {blockIndex, blockSize};
678  computeFuncOperands.append(tripCounts);
679  computeFuncOperands.append(op.getLowerBound().begin(),
680  op.getLowerBound().end());
681  computeFuncOperands.append(op.getUpperBound().begin(),
682  op.getUpperBound().end());
683  computeFuncOperands.append(op.getStep().begin(), op.getStep().end());
684  computeFuncOperands.append(parallelComputeFunction.captures);
685  return computeFuncOperands;
686  };
687 
688  // Induction variable is the index of the block: [0, blockCount).
689  LoopBodyBuilder loopBuilder = [&](OpBuilder &loopBuilder, Location loc,
690  Value iv, ValueRange args) {
691  ImplicitLocOpBuilder b(loc, loopBuilder);
692 
693  // Call parallel compute function inside the async.execute region.
694  auto executeBodyBuilder = [&](OpBuilder &executeBuilder,
695  Location executeLoc, ValueRange executeArgs) {
696  executeBuilder.create<func::CallOp>(executeLoc, compute.getSymName(),
697  compute.getCallableResults(),
698  computeFuncOperands(iv));
699  executeBuilder.create<async::YieldOp>(executeLoc, ValueRange());
700  };
701 
702  // Create async.execute operation to launch parallel computate function.
703  auto execute = b.create<ExecuteOp>(TypeRange(), ValueRange(), ValueRange(),
704  executeBodyBuilder);
705  b.create<AddToGroupOp>(rewriter.getIndexType(), execute.getToken(), group);
706  b.create<scf::YieldOp>();
707  };
708 
709  // Iterate over all compute blocks and launch parallel compute operations.
710  b.create<scf::ForOp>(c1, blockCount, c1, ValueRange(), loopBuilder);
711 
712  // Call parallel compute function for the first block in the caller thread.
713  b.create<func::CallOp>(compute.getSymName(), compute.getCallableResults(),
714  computeFuncOperands(c0));
715 
716  // Wait for the completion of all async compute operations.
717  b.create<AwaitAllOp>(group);
718 }
719 
721 AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op,
722  PatternRewriter &rewriter) const {
723  // We do not currently support rewrite for parallel op with reductions.
724  if (op.getNumReductions() != 0)
725  return failure();
726 
727  ImplicitLocOpBuilder b(op.getLoc(), rewriter);
728 
729  // Computing minTaskSize emits IR and can be implemented as executing a cost
730  // model on the body of the scf.parallel. Thus it needs to be computed before
731  // the body of the scf.parallel has been manipulated.
732  Value minTaskSize = computeMinTaskSize(b, op);
733 
734  // Make sure that all constants will be inside the parallel operation body to
735  // reduce the number of parallel compute function arguments.
736  cloneConstantsIntoTheRegion(op.getLoopBody(), rewriter);
737 
738  // Compute trip count for each loop induction variable:
739  // tripCount = ceil_div(upperBound - lowerBound, step);
740  SmallVector<Value> tripCounts(op.getNumLoops());
741  for (size_t i = 0; i < op.getNumLoops(); ++i) {
742  auto lb = op.getLowerBound()[i];
743  auto ub = op.getUpperBound()[i];
744  auto step = op.getStep()[i];
745  auto range = b.createOrFold<arith::SubIOp>(ub, lb);
746  tripCounts[i] = b.createOrFold<arith::CeilDivSIOp>(range, step);
747  }
748 
749  // Compute a product of trip counts to get the 1-dimensional iteration space
750  // for the scf.parallel operation.
751  Value tripCount = tripCounts[0];
752  for (size_t i = 1; i < tripCounts.size(); ++i)
753  tripCount = b.create<arith::MulIOp>(tripCount, tripCounts[i]);
754 
755  // Short circuit no-op parallel loops (zero iterations) that can arise from
756  // the memrefs with dynamic dimension(s) equal to zero.
757  Value c0 = b.create<arith::ConstantIndexOp>(0);
758  Value isZeroIterations =
759  b.create<arith::CmpIOp>(arith::CmpIPredicate::eq, tripCount, c0);
760 
761  // Do absolutely nothing if the trip count is zero.
762  auto noOp = [&](OpBuilder &nestedBuilder, Location loc) {
763  nestedBuilder.create<scf::YieldOp>(loc);
764  };
765 
766  // Compute the parallel block size and dispatch concurrent tasks computing
767  // results for each block.
768  auto dispatch = [&](OpBuilder &nestedBuilder, Location loc) {
769  ImplicitLocOpBuilder b(loc, nestedBuilder);
770 
771  // Collect statically known constants defining the loop nest in the parallel
772  // compute function. LLVM can't always push constants across the non-trivial
773  // async dispatch call graph, by providing these values explicitly we can
774  // choose to build more efficient loop nest, and rely on a better constant
775  // folding, loop unrolling and vectorization.
776  ParallelComputeFunctionBounds staticBounds = {
777  integerConstants(tripCounts),
778  integerConstants(op.getLowerBound()),
779  integerConstants(op.getUpperBound()),
780  integerConstants(op.getStep()),
781  };
782 
783  // Find how many inner iteration dimensions are statically known, and their
784  // product is smaller than the `512`. We align the parallel compute block
785  // size by the product of statically known dimensions, so that we can
786  // guarantee that the inner loops executes from 0 to the loop trip counts
787  // and we can elide dynamic loop boundaries, and give LLVM an opportunity to
788  // unroll the loops. The constant `512` is arbitrary, it should depend on
789  // how many iterations LLVM will typically decide to unroll.
790  static constexpr int64_t maxUnrollableIterations = 512;
791 
792  // The number of inner loops with statically known number of iterations less
793  // than the `maxUnrollableIterations` value.
794  int numUnrollableLoops = 0;
795 
796  auto getInt = [](IntegerAttr attr) { return attr ? attr.getInt() : 0; };
797 
798  SmallVector<int64_t> numIterations(op.getNumLoops());
799  numIterations.back() = getInt(staticBounds.tripCounts.back());
800 
801  for (int i = op.getNumLoops() - 2; i >= 0; --i) {
802  int64_t tripCount = getInt(staticBounds.tripCounts[i]);
803  int64_t innerIterations = numIterations[i + 1];
804  numIterations[i] = tripCount * innerIterations;
805 
806  // Update the number of inner loops that we can potentially unroll.
807  if (innerIterations > 0 && innerIterations <= maxUnrollableIterations)
808  numUnrollableLoops++;
809  }
810 
811  Value numWorkerThreadsVal;
812  if (numWorkerThreads >= 0)
813  numWorkerThreadsVal = b.create<arith::ConstantIndexOp>(numWorkerThreads);
814  else
815  numWorkerThreadsVal = b.create<async::RuntimeNumWorkerThreadsOp>();
816 
817  // With large number of threads the value of creating many compute blocks
818  // is reduced because the problem typically becomes memory bound. For this
819  // reason we scale the number of workers using an equivalent to the
820  // following logic:
821  // float overshardingFactor = numWorkerThreads <= 4 ? 8.0
822  // : numWorkerThreads <= 8 ? 4.0
823  // : numWorkerThreads <= 16 ? 2.0
824  // : numWorkerThreads <= 32 ? 1.0
825  // : numWorkerThreads <= 64 ? 0.8
826  // : 0.6;
827 
828  // Pairs of non-inclusive lower end of the bracket and factor that the
829  // number of workers needs to be scaled with if it falls in that bucket.
830  const SmallVector<std::pair<int, float>> overshardingBrackets = {
831  {4, 4.0f}, {8, 2.0f}, {16, 1.0f}, {32, 0.8f}, {64, 0.6f}};
832  const float initialOvershardingFactor = 8.0f;
833 
834  Value scalingFactor = b.create<arith::ConstantFloatOp>(
835  llvm::APFloat(initialOvershardingFactor), b.getF32Type());
836  for (const std::pair<int, float> &p : overshardingBrackets) {
837  Value bracketBegin = b.create<arith::ConstantIndexOp>(p.first);
838  Value inBracket = b.create<arith::CmpIOp>(
839  arith::CmpIPredicate::sgt, numWorkerThreadsVal, bracketBegin);
840  Value bracketScalingFactor = b.create<arith::ConstantFloatOp>(
841  llvm::APFloat(p.second), b.getF32Type());
842  scalingFactor = b.create<arith::SelectOp>(inBracket, bracketScalingFactor,
843  scalingFactor);
844  }
845  Value numWorkersIndex =
846  b.create<arith::IndexCastOp>(b.getI32Type(), numWorkerThreadsVal);
847  Value numWorkersFloat =
848  b.create<arith::SIToFPOp>(b.getF32Type(), numWorkersIndex);
849  Value scaledNumWorkers =
850  b.create<arith::MulFOp>(scalingFactor, numWorkersFloat);
851  Value scaledNumInt =
852  b.create<arith::FPToSIOp>(b.getI32Type(), scaledNumWorkers);
853  Value scaledWorkers =
854  b.create<arith::IndexCastOp>(b.getIndexType(), scaledNumInt);
855 
856  Value maxComputeBlocks = b.create<arith::MaxSIOp>(
857  b.create<arith::ConstantIndexOp>(1), scaledWorkers);
858 
859  // Compute parallel block size from the parallel problem size:
860  // blockSize = min(tripCount,
861  // max(ceil_div(tripCount, maxComputeBlocks),
862  // minTaskSize))
863  Value bs0 = b.create<arith::CeilDivSIOp>(tripCount, maxComputeBlocks);
864  Value bs1 = b.create<arith::MaxSIOp>(bs0, minTaskSize);
865  Value blockSize = b.create<arith::MinSIOp>(tripCount, bs1);
866 
867  // Dispatch parallel compute function using async recursive work splitting,
868  // or by submitting compute task sequentially from a caller thread.
869  auto doDispatch = asyncDispatch ? doAsyncDispatch : doSequentialDispatch;
870 
871  // Create a parallel compute function that takes a block id and computes
872  // the parallel operation body for a subset of iteration space.
873 
874  // Compute the number of parallel compute blocks.
875  Value blockCount = b.create<arith::CeilDivSIOp>(tripCount, blockSize);
876 
877  // Dispatch parallel compute function without hints to unroll inner loops.
878  auto dispatchDefault = [&](OpBuilder &nestedBuilder, Location loc) {
879  ParallelComputeFunction compute =
880  createParallelComputeFunction(op, staticBounds, 0, rewriter);
881 
882  ImplicitLocOpBuilder b(loc, nestedBuilder);
883  doDispatch(b, rewriter, compute, op, blockSize, blockCount, tripCounts);
884  b.create<scf::YieldOp>();
885  };
886 
887  // Dispatch parallel compute function with hints for unrolling inner loops.
888  auto dispatchBlockAligned = [&](OpBuilder &nestedBuilder, Location loc) {
889  ParallelComputeFunction compute = createParallelComputeFunction(
890  op, staticBounds, numUnrollableLoops, rewriter);
891 
892  ImplicitLocOpBuilder b(loc, nestedBuilder);
893  // Align the block size to be a multiple of the statically known
894  // number of iterations in the inner loops.
895  Value numIters = b.create<arith::ConstantIndexOp>(
896  numIterations[op.getNumLoops() - numUnrollableLoops]);
897  Value alignedBlockSize = b.create<arith::MulIOp>(
898  b.create<arith::CeilDivSIOp>(blockSize, numIters), numIters);
899  doDispatch(b, rewriter, compute, op, alignedBlockSize, blockCount,
900  tripCounts);
901  b.create<scf::YieldOp>();
902  };
903 
904  // Dispatch to block aligned compute function only if the computed block
905  // size is larger than the number of iterations in the unrollable inner
906  // loops, because otherwise it can reduce the available parallelism.
907  if (numUnrollableLoops > 0) {
908  Value numIters = b.create<arith::ConstantIndexOp>(
909  numIterations[op.getNumLoops() - numUnrollableLoops]);
910  Value useBlockAlignedComputeFn = b.create<arith::CmpIOp>(
911  arith::CmpIPredicate::sge, blockSize, numIters);
912 
913  b.create<scf::IfOp>(useBlockAlignedComputeFn, dispatchBlockAligned,
914  dispatchDefault);
915  b.create<scf::YieldOp>();
916  } else {
917  dispatchDefault(b, loc);
918  }
919  };
920 
921  // Replace the `scf.parallel` operation with the parallel compute function.
922  b.create<scf::IfOp>(isZeroIterations, noOp, dispatch);
923 
924  // Parallel operation was replaced with a block iteration loop.
925  rewriter.eraseOp(op);
926 
927  return success();
928 }
929 
930 void AsyncParallelForPass::runOnOperation() {
931  MLIRContext *ctx = &getContext();
932 
933  RewritePatternSet patterns(ctx);
935  patterns, asyncDispatch, numWorkerThreads,
936  [&](ImplicitLocOpBuilder builder, scf::ParallelOp op) {
937  return builder.create<arith::ConstantIndexOp>(minTaskSize);
938  });
939  if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
940  signalPassFailure();
941 }
942 
943 std::unique_ptr<Pass> mlir::createAsyncParallelForPass() {
944  return std::make_unique<AsyncParallelForPass>();
945 }
946 
947 std::unique_ptr<Pass> mlir::createAsyncParallelForPass(bool asyncDispatch,
948  int32_t numWorkerThreads,
949  int32_t minTaskSize) {
950  return std::make_unique<AsyncParallelForPass>(asyncDispatch, numWorkerThreads,
951  minTaskSize);
952 }
953 
955  RewritePatternSet &patterns, bool asyncDispatch, int32_t numWorkerThreads,
956  const AsyncMinTaskSizeComputationFunction &computeMinTaskSize) {
957  MLIRContext *ctx = patterns.getContext();
958  patterns.add<AsyncParallelForRewrite>(ctx, asyncDispatch, numWorkerThreads,
959  computeMinTaskSize);
960 }
static ParallelComputeFunction createParallelComputeFunction(scf::ParallelOp op, const ParallelComputeFunctionBounds &bounds, unsigned numBlockAlignedInnerLoops, PatternRewriter &rewriter)
static func::FuncOp createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, PatternRewriter &rewriter)
static void doSequentialDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, ParallelComputeFunction &parallelComputeFunction, scf::ParallelOp op, Value blockSize, Value blockCount, const SmallVector< Value > &tripCounts)
static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, ParallelComputeFunction &parallelComputeFunction, scf::ParallelOp op, Value blockSize, Value blockCount, const SmallVector< Value > &tripCounts)
static ParallelComputeFunctionType getParallelComputeFunctionType(scf::ParallelOp op, PatternRewriter &rewriter)
static SmallVector< IntegerAttr > integerConstants(ValueRange values)
This class represents an argument of a Block.
Definition: Value.h:304
Block represents an ordered list of Operations.
Definition: Block.h:30
BlockArgument getArgument(unsigned i)
Definition: Block.h:118
BlockArgListType getArguments()
Definition: Block.h:76
FunctionType getFunctionType(TypeRange inputs, TypeRange results)
Definition: Builders.cpp:81
MLIRContext * getContext() const
Definition: Builders.h:55
IndexType getIndexType()
Definition: Builders.cpp:56
This is a utility class for mapping one set of IR entities to another.
Definition: IRMapping.h:26
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
Definition: IRMapping.h:30
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
OpTy create(Args &&...args)
Create an operation of specific op type at the current insertion point and location.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:56
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:301
This class helps build Operations.
Definition: Builders.h:199
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition: Builders.cpp:510
void setInsertionPointToEnd(Block *block)
Sets the insertion point to the end of the specified block.
Definition: Builders.h:389
Listener * getListener() const
Returns the current listener of this builder, or nullptr if this builder doesn't have a listener.
Definition: Builders.h:273
Block * createBlock(Region *parent, Region::iterator insertPt={}, TypeRange argTypes=std::nullopt, ArrayRef< Location > locs=std::nullopt)
Add new block with 'argTypes' arguments and set the insertion point to the end of it.
Definition: Builders.cpp:395
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:422
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:365
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:621
MLIRContext * getContext() const
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
This class allows for representing and managing the symbol table used by operations with the 'SymbolT...
Definition: SymbolTable.h:23
StringAttr insert(Operation *symbol, Block::iterator insertPt={})
Insert a new symbol into the table, and rename it as necessary to avoid collisions.
This class provides an abstraction over the various different ranges of value types.
Definition: TypeRange.h:36
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:350
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:93
std::function< Value(ImplicitLocOpBuilder, scf::ParallelOp)> AsyncMinTaskSizeComputationFunction
Emit the IR to compute the minimum number of iterations of scf.parallel body that would be viable for...
Definition: Transforms.h:29
void populateAsyncParallelForPatterns(RewritePatternSet &patterns, bool asyncDispatch, int32_t numWorkerThreads, const AsyncMinTaskSizeComputationFunction &computeMinTaskSize)
Add a pattern to the given pattern list to lower scf.parallel to async operations.
void cloneConstantsIntoTheRegion(Region &region)
Clone ConstantLike operations that are defined above the given region and have users in the region in...
Definition: PassDetail.cpp:15
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:322
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
LogicalResult applyPatternsAndFoldGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig())
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
std::unique_ptr< Pass > createAsyncParallelForPass()
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
SmallVector< int64_t > delinearize(ArrayRef< int64_t > strides, int64_t linearIndex)
Given the strides together with a linear index in the dimension space, returns the vector-space offse...
void getUsedValuesDefinedAbove(Region &region, Region &limit, SetVector< Value > &values)
Fill values with a list of values defined at the ancestors of the limit region and used within region...
Definition: RegionUtils.cpp:59
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
Definition: Matchers.h:248
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
virtual void notifyOperationInserted(Operation *op)
Notification handler for when an operation is inserted into the builder.
Definition: Builders.h:261
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:357