MLIR  15.0.0git
LinalgInterfaces.cpp
Go to the documentation of this file.
1 //===- LinalgInterfaces.cpp - Linalg interfaces implementation ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
17 #include "mlir/IR/AffineMap.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 
21 using namespace mlir;
22 using namespace mlir::linalg;
23 
24 /// Include the definitions of the copy operation interface.
25 #include "mlir/Dialect/Linalg/IR/LinalgInterfaces.cpp.inc"
26 
27 //===----------------------------------------------------------------------===//
28 // Interface utility functions
29 //===----------------------------------------------------------------------===//
31  linalg::LinalgOp linalgOp, ArrayRef<OpOperand *> droppedOperands) {
32  SmallVector<AffineMap> indexingMaps;
33  for (auto *opOperand : linalgOp.getInputAndOutputOperands()) {
34  if (llvm::is_contained(droppedOperands, opOperand))
35  continue;
36  indexingMaps.push_back(linalgOp.getTiedIndexingMap(opOperand));
37  }
38  return inversePermutation(concatAffineMaps(indexingMaps)) != AffineMap();
39 }
40 
41 //===----------------------------------------------------------------------===//
42 // ContractionOpInterface implementation
43 //===----------------------------------------------------------------------===//
44 
45 /// Return true if the use-def chain from `v` to `from` consists of 0 or more
46 /// unary single-operand operations.
47 // TODO: relax to multi-operands with constants, which are technically unary ops
48 // as needed (e.g. add5).
49 static bool isChainOfUnaryOpsFrom(Value v, Value from) {
50  while (true) {
51  if (v == from)
52  return true;
53  Operation *op = v.getDefiningOp();
54  if (!op || op->getNumOperands() != 1)
55  return false;
56  v = op->getOperand(0);
57  };
58 }
59 
60 /// Return the unique instance of OpType in `block` if it is indeed unique.
61 /// Return null if none or more than 1 instances exist.
62 template <typename OpType>
63 static OpType getSingleOpOfType(Block &block) {
64  OpType res = nullptr;
65  block.walk([&](OpType op) {
66  if (res) {
67  res = nullptr;
68  return WalkResult::interrupt();
69  }
70  res = op;
71  return WalkResult::advance();
72  });
73  return res;
74 }
75 
76 /// Detect whether res is any permutation of `u5(u1(c) + u2(u3(a) * u4(b)))`
77 /// on the field (AddOpType, MulOpType), where u1, u2, u3, u4 and u5 represent
78 /// unary operations that may change the type.
79 template <typename AddOpType, typename MulOpType>
80 static bool isAddMul(Block &block) {
81  if (block.getNumArguments() != 3)
82  return false;
83  Operation *yieldOp = block.getTerminator();
84  if (yieldOp->getNumOperands() != 1)
85  return false;
86 
87  AddOpType addOp = getSingleOpOfType<AddOpType>(block);
88  MulOpType mulOp = getSingleOpOfType<MulOpType>(block);
89  if (!addOp || !mulOp)
90  return false;
91 
92  Value argA = block.getArgument(0), argB = block.getArgument(1);
93  Value a = mulOp->getOperand(0), b = mulOp->getOperand(1);
94  Value mul = mulOp->getResult(0);
95  Value argC = block.getArgument(2);
96  Value c1 = addOp->getOperand(0), c2 = addOp->getOperand(1);
97  Value add = addOp->getResult(0);
98  Value res = yieldOp->getOperand(0);
99  // Result traces back to add.
100  auto un = isChainOfUnaryOpsFrom;
101  bool success = un(res, add);
102  // One of the operands of add traces back to argC, the other to the mul.
103  success |= (un(c1, argC) && un(c2, mul)) || ((un(c1, mul)) && un(c2, argC));
104  // One of the operands of mul traces back to argA, the other to argB.
105  success |= (un(a, argA) && un(b, argB)) || ((un(a, argB)) && un(b, argA));
106  return success;
107 }
108 
110  Success = 0,
111  NotLinalgOp,
113  NoReduction,
115  NotAddMul
116 };
118  auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
119  if (!linalgOp)
121  if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1)
123  auto mapRange = linalgOp.indexing_maps().getAsValueRange<AffineMapAttr>();
124  if (linalgOp.getNumReductionLoops() == 0)
126  if (llvm::any_of(mapRange,
127  [](AffineMap m) { return !m.isProjectedPermutation(); }))
129  // TODO: more fields than add/mul.
130  if (!isAddMul<arith::AddFOp, arith::MulFOp>(linalgOp->getRegion(0).front()) &&
131  !isAddMul<arith::AddIOp, arith::MulIOp>(linalgOp->getRegion(0).front()) &&
132  !isAddMul<complex::AddOp, complex::MulOp>(linalgOp->getRegion(0).front()))
135 }
136 
138  if (!linalgOp)
139  return false;
140  Operation *op = linalgOp.getOperation();
141  return isa<ContractionOpInterface>(op) ||
143 }
144 
145 /// Verify that a LinalgOp `op` is a contraction.
146 /// A Linalg contraction is defined in general terms:
147 /// 1. Has 2 input and 1 output shapes.
148 /// 2. Has at least one reduction dimension.
149 /// 3. Has only projected permutation indexing maps.
150 /// 4. its body computes `u5(u1(c) + u2(u3(a) * u4(b)))` on some field
151 /// (AddOpType, MulOpType), where u1, u2, u3, u4 and u5 represent scalar unary
152 /// operations that may change the type (e.g. for mixed-precision).
153 /// As a consequence, when vectorization of such an op occurs, the only special
154 /// behavior is that the (unique) MulOpType is vectorized into a
155 /// `vector.contract`. All other ops are handled in a generic fashion.
156 /// In the future, we may wish to allow more input arguments and elementwise and
157 /// constant operations that do not involve the reduction dimension(s).
159  auto res = isContractionInterfaceImpl(op);
161  return op->emitError("expected a LinalgOp");
163  return op->emitError("expected op with 2 inputs and 1 outputs");
165  return op->emitError("expected at least a reduction loop");
167  return op->emitError("expected all indexings to be projected permutations");
169  return op->emitError("(add, mul) operations not found");
170  return success();
171 }
172 
173 //===----------------------------------------------------------------------===//
174 // ConvolutionOpInterface implementation
175 //===----------------------------------------------------------------------===//
176 
177 /// Of the given two expressions returns one that is of type T (`lhs` gets
178 /// preference over `rhs`)
179 template <typename T>
181  return lhs.isa<T>() ? lhs.cast<T>()
182  : (rhs.isa<T>() ? rhs.cast<T>() : nullptr);
183 }
184 
185 namespace {
186 /// Walk the indexing expressions for input of a convolution operation to verify
187 /// its of the right form, either
188 /// - AffineDimExpr
189 /// - AffineDimExpr (`*` (AffineSymbolExpr | AffineConstantExpr))?
190 /// (`+` AffineDimExpr (`*` (AffineSymbolExpr | AffineConstantExpr))?)*
191 ///
192 /// classifies the AffineDimExpr as convolved dimensions or unconvolved
193 /// dimensions and verifies each dimension occurs only once.
194 struct ConvAccessExprWalker
195  : public AffineExprVisitor<ConvAccessExprWalker, LogicalResult> {
196  llvm::SmallDenseSet<unsigned> convolvedDims;
197  llvm::SmallDenseSet<unsigned> unConvolvedDims;
198 
199  LogicalResult visitDimExpr(AffineDimExpr dimExpr) {
200  unsigned position = dimExpr.getPosition();
201  if (unConvolvedDims.count(position) || convolvedDims.count(position)) {
202  return failure();
203  }
204  unConvolvedDims.insert(position);
205  return success();
206  }
207 
208  LogicalResult visitSymbolExpr(AffineSymbolExpr expr) { return failure(); }
209 
210  LogicalResult visitConstantExpr(AffineConstantExpr expr) { return failure(); }
211 
212  LogicalResult visitAffineBinaryOpExpr(AffineBinaryOpExpr binaryExpr) {
213  // In pre-order visit, top level op has to be an add op.
214  if (binaryExpr.getKind() != AffineExprKind::Add)
215  return failure();
216  return success(succeeded(isDimExprOrMulExpr(binaryExpr.getLHS())) &&
217  succeeded(isDimExprOrMulExpr(binaryExpr.getRHS())));
218  }
219 
220  LogicalResult isDimExprOrMulExpr(AffineExpr expr) {
221  if (auto dimExpr = expr.dyn_cast<AffineDimExpr>()) {
222  unsigned dim = dimExpr.getPosition();
223  if (convolvedDims.count(dim) || unConvolvedDims.count(dim))
224  return failure();
225  convolvedDims.insert(dim);
226  return success();
227  }
228  if (auto symbolMulExpr = expr.dyn_cast<AffineBinaryOpExpr>()) {
229  if (symbolMulExpr.getKind() != AffineExprKind::Mul)
230  return failure();
231  auto lhsExpr = symbolMulExpr.getLHS();
232  auto rhsExpr = symbolMulExpr.getRHS();
233  // Check for symbol expression.
234  AffineExpr mulExpr =
235  getAffineExprOfType<AffineSymbolExpr>(lhsExpr, rhsExpr);
236  // If there was no symbol expr, check for constant expression.
237  if (!mulExpr) {
238  mulExpr = getAffineExprOfType<AffineConstantExpr>(lhsExpr, rhsExpr);
239  }
240  auto dimExpr = getAffineExprOfType<AffineDimExpr>(lhsExpr, rhsExpr);
241  if (!mulExpr || !dimExpr)
242  return failure();
243  unsigned dim = dimExpr.getPosition();
244  if (convolvedDims.count(dim) || unConvolvedDims.count(dim))
245  return failure();
246  convolvedDims.insert(dim);
247  return success();
248  }
249  return failure();
250  }
251 };
252 } // namespace
253 
254 static llvm::SmallDenseSet<unsigned> getPreservedDims(AffineMap map) {
255  assert(map.isProjectedPermutation() &&
256  "expected map to have projected permutations");
257  llvm::SmallDenseSet<unsigned> preservedDims;
258  for (auto expr : map.getResults())
259  preservedDims.insert(expr.cast<AffineDimExpr>().getPosition());
260  return preservedDims;
261 }
262 
264  Success = 0,
265  NotLinalgOp,
272 };
273 
275  auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
276  if (!linalgOp)
278  if (linalgOp.getNumInputs() < 2 || linalgOp.getNumOutputs() != 1)
280 
281  auto indexingMaps = linalgOp.getIndexingMaps();
282 
283  // Check the input indexing map has the right form.
284  ConvAccessExprWalker inputExprWalker;
285  if (llvm::any_of(indexingMaps[0].getResults(),
286  [&inputExprWalker](AffineExpr expr) {
287  return failed(inputExprWalker.visit(expr));
288  })) {
290  }
291 
292  // Filter and output maps must be projected permutation.
293  if (!indexingMaps[1].isProjectedPermutation() ||
294  !indexingMaps.back().isProjectedPermutation())
296 
297  auto iteratorTypesRange =
298  linalgOp.iterator_types().getAsValueRange<StringAttr>();
299 
300  llvm::SmallDenseSet<unsigned> outputDims =
301  getPreservedDims(indexingMaps.back());
302  llvm::SmallDenseSet<unsigned> filterDims = getPreservedDims(indexingMaps[1]);
303  // Make sure all loops are charecterized as one of:
304  // - Batch loop : present in output, as non-convolved in input, not present in
305  // filter.
306  // - Output image dimension : present in output, convolved dims in input, not
307  // present in filter.
308  // - Output channel dimension : present in output, not present in input,
309  // present in filter.
310  // - Filter loop dimension : present in filter, convolved in input, not
311  // present in output.
312  // - Input channel dimension : unconvolved in input, not present in output,
313  // present in filter.
314  // - Depth multiplier : unconvolved in input, present in output, present in
315  // filter.
316  llvm::SmallDenseSet<unsigned> allLoopDims;
317  for (auto outputExpr : indexingMaps.back().getResults()) {
318  unsigned outputDim = outputExpr.cast<AffineDimExpr>().getPosition();
319  if (inputExprWalker.unConvolvedDims.count(outputDim) &&
320  !filterDims.count(outputDim)) {
321  // Batch dimension.
322  if (*std::next(iteratorTypesRange.begin(), outputDim) !=
325  allLoopDims.insert(outputDim);
326  continue;
327  }
328  if (inputExprWalker.convolvedDims.count(outputDim) &&
329  !filterDims.count(outputDim)) {
330  // Output image Loop dimension.
331  if (*std::next(iteratorTypesRange.begin(), outputDim) !=
334  allLoopDims.insert(outputDim);
335  continue;
336  }
337  if (!inputExprWalker.convolvedDims.count(outputDim) &&
338  !inputExprWalker.unConvolvedDims.count(outputDim) &&
339  filterDims.count(outputDim)) {
340  // Output channel dimension.
341  if (*std::next(iteratorTypesRange.begin(), outputDim) !=
344  allLoopDims.insert(outputDim);
345  continue;
346  }
347  if (inputExprWalker.unConvolvedDims.count(outputDim) &&
348  filterDims.count(outputDim)) {
349  // Depth multiplier.
350  if (*std::next(iteratorTypesRange.begin(), outputDim) !=
353  allLoopDims.insert(outputDim);
354  continue;
355  }
357  }
358  for (auto filterExpr : indexingMaps[1].getResults()) {
359  unsigned filterDim = filterExpr.cast<AffineDimExpr>().getPosition();
360  if (outputDims.count(filterDim) &&
361  !inputExprWalker.unConvolvedDims.count(filterDim) &&
362  !inputExprWalker.convolvedDims.count(filterDim)) {
363  // Output channel dimension. THis is already seen, continue;
364  continue;
365  }
366  if (inputExprWalker.convolvedDims.count(filterDim) &&
367  !outputDims.count(filterDim)) {
368  // Filter loop dimension.
369  if (*std::next(iteratorTypesRange.begin(), filterDim) !=
372  if (allLoopDims.count(filterDim))
374  allLoopDims.insert(filterDim);
375  continue;
376  }
377  if (inputExprWalker.unConvolvedDims.count(filterDim) &&
378  !outputDims.count(filterDim)) {
379  // Input channel dimension.
380  if (*std::next(iteratorTypesRange.begin(), filterDim) !=
383  if (allLoopDims.count(filterDim))
385  allLoopDims.insert(filterDim);
386  continue;
387  }
388  if (inputExprWalker.unConvolvedDims.count(filterDim) &&
389  outputDims.count(filterDim)) {
390  // Depthwise loop. Already seen.
391  continue;
392  }
394  }
395  // All loops must be covered now.
396  if (allLoopDims.size() != linalgOp.getNumLoops())
398 
400 }
401 
403  auto res = isConvolutionInterfaceImpl(op);
405  return op->emitError("expected a LinalgOp");
407  return op->emitError("expected op with 2 inputs and 1 output");
409  return op->emitError("unexpected input index map for convolutions");
411  return op->emitError(
412  "expected output/filter indexing maps to be projected permutations");
413  }
415  return op->emitError("unexpected loop dimension for convolution op");
416  }
418  return op->emitError(
419  "expected all iterators used to access outputs to be parallel");
420  }
422  return op->emitError(
423  "expected all iterators not used to access outputs to be reduction");
424  }
425  return success();
426 }
427 
428 //===----------------------------------------------------------------------===//
429 // FillOpInterface implementation
430 //===----------------------------------------------------------------------===//
431 
432 enum class MatchFillResult {
433  Success = 0,
434  NotLinalgOp,
437 };
438 
440  auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
441  if (!linalgOp)
443  if (linalgOp.getNumInputs() != 1 || linalgOp.getNumOutputs() != 1)
445 
446  OpOperand *value = linalgOp.getInputOperand(0);
447  if (!linalgOp.isScalar(value))
449 
451 }
452 
454  auto res = isFillInterfaceImpl(op);
455  if (res == MatchFillResult::NotLinalgOp)
456  return op->emitError("expected a LinalgOp");
458  return op->emitError("expected op with 1 input and 1 output");
460  return op->emitError("expected op with scalar input");
461 
462  return success();
463 }
464 
465 //===----------------------------------------------------------------------===//
466 // StructuredOpInterface implementation
467 //===----------------------------------------------------------------------===//
468 
469 OpOperandVector::operator SmallVector<Value>() {
470  SmallVector<Value> result;
471  result.reserve(this->size());
472  llvm::transform(*this, std::back_inserter(result),
473  [](OpOperand *opOperand) { return opOperand->get(); });
474  return result;
475 }
476 
477 /// Helper function that creates a memref::DimOp or tensor::DimOp depending on
478 /// the type of `source`.
480  int64_t dim) {
481  if (source.getType().isa<UnrankedMemRefType, MemRefType>())
482  return b.createOrFold<memref::DimOp>(loc, source, dim);
483  if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
484  return b.createOrFold<tensor::DimOp>(loc, source, dim);
485  llvm_unreachable("Expected MemRefType or TensorType");
486 }
487 
488 SmallVector<Value, 4> LinalgOp::createFlatListOfOperandDims(OpBuilder &b,
489  Location loc) {
491  for (OpOperand *opOperand : getInputAndOutputOperands()) {
492  for (int64_t i = 0, e = getRank(opOperand); i < e; ++i)
493  res.push_back(createOrFoldDimOp(b, loc, opOperand->get(), i));
494  }
495  return res;
496 }
497 
498 SmallVector<int64_t, 4> LinalgOp::createFlatListOfOperandStaticDims() {
500  assert(!hasDynamicShape() && "expected operands to have static shapes");
501  for (OpOperand *opOperand : getInputAndOutputOperands())
502  llvm::append_range(res, getShape(opOperand));
503  return res;
504 }
505 
506 SmallVector<Range, 4> LinalgOp::createLoopRanges(OpBuilder &b, Location loc) {
507  AffineMap map = getLoopsToShapesMap();
508  unsigned numDims = map.getNumDims(), numRes = map.getNumResults();
509  auto viewSizes = createFlatListOfOperandDims(b, loc);
510  SmallVector<Range, 4> res(numDims);
511  Value zeroVal = b.create<arith::ConstantIndexOp>(loc, 0);
512  Value oneVal = b.create<arith::ConstantIndexOp>(loc, 1);
513  for (unsigned idx = 0; idx < numRes; ++idx) {
514  auto result = map.getResult(idx);
515  if (auto d = result.dyn_cast<AffineDimExpr>()) {
516  if (res[d.getPosition()].offset)
517  continue;
518  res[d.getPosition()] = Range{zeroVal, viewSizes[idx], oneVal};
519  }
520  }
521  return res;
522 }
523 
524 SmallVector<int64_t, 4> LinalgOp::computeStaticLoopSizes() {
525  AffineMap map = getLoopsToShapesMap();
526  unsigned numDims = map.getNumDims(), numRes = map.getNumResults();
527  SmallVector<int64_t, 4> allShapeSizes = createFlatListOfOperandStaticDims();
528  SmallVector<int64_t, 4> res(numDims, 0);
529  for (unsigned idx = 0; idx < numRes; ++idx) {
530  auto result = map.getResult(idx);
531  if (auto d = result.dyn_cast<AffineDimExpr>())
532  res[d.getPosition()] = allShapeSizes[idx];
533  }
534  return res;
535 }
536 
537 /// Visitor to check if any of the given set of positions from AffineDimExprs
538 /// are used within an AffineExpr.
540  : public AffineExprVisitor<HasAffineDimExprVisitor, bool> {
541  HasAffineDimExprVisitor(llvm::SmallBitVector positions)
542  : positions(std::move(positions)) {}
543 
545  return visit(binaryOpExpr.getLHS()) || visit(binaryOpExpr.getRHS());
546  }
547 
548  bool visitDimExpr(AffineDimExpr dimExpr) {
549  return positions.test(dimExpr.getPosition());
550  }
551 
552  bool visitConstantExpr(AffineConstantExpr constExpr) { return false; }
553 
554  bool visitSymbolExpr(AffineSymbolExpr symbolExpr) { return false; }
555 
556 private:
557  llvm::SmallBitVector positions;
558 };
559 
561 LinalgOp::reifyResultShapes(OpBuilder &b,
562  ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
563  // An example that helps understand the logic below.
564  // Consider the following expression O(i+j, j) += A(i,k) * B(k, j)
565  // We want to express the shape of dim 0 of O in terms of shape of the inputs.
566  // This is achieved as follows.
567  // loopsToShapesMap = (d0, d1, d2) -> (d0, d2, d2, d1, d0 + d1, d1)
568  // subMapOfResultShapes = (d0, d1, d2) -> (d0 + d1, d1)
569  // shapesToLoopsMap = (d0, d2, d2, d3, d4, d5) -> (d0, d3, d2)
570  // resultShapesFromInputShapes = subMapOfResultDim.compose(shapesToLoopMap)
571  // = (d0, d1, d2, d3, d4, d5) -> (d0 + d1, d1)
572  AffineMap loopsToShapesMap = getLoopsToShapesMap();
573 
574  // Find the position in the above map that represents the shape of the
575  // result:dim being inferred.
576  auto resultShapesSubMapPos = getResultsPositionInLoopsToShapeMap();
577 
578  /// From loopsToShapesMap extract the submap that represents the shape of the
579  /// (resultIdx, dim) needed.
580  AffineMap loopToResultsShapeMap = loopsToShapesMap.getSliceMap(
581  resultShapesSubMapPos.first,
582  resultShapesSubMapPos.second - resultShapesSubMapPos.first);
583  AffineMap resultShapesFromInputShapesMap =
584  loopToResultsShapeMap.compose(getShapesToLoopsMap());
585 
586  // Check that the result dim map does not contain the positions corresponding
587  // to the outputs.
588  llvm::SmallBitVector outputDims(resultShapesFromInputShapesMap.getNumDims());
589  outputDims.set(resultShapesSubMapPos.first, resultShapesSubMapPos.second);
590  HasAffineDimExprVisitor checkDimExpr(std::move(outputDims));
591  Location loc = getOperation()->getLoc();
592  auto allResultDimValues =
593  applyMapToValues(b, loc, resultShapesFromInputShapesMap,
594  createFlatListOfOperandDims(b, loc));
595  int64_t pos = 0;
596  ArrayRef<AffineExpr> shapeExprs = resultShapesFromInputShapesMap.getResults();
597  for (OpOperand *opOperand : getOutputOperands()) {
598  SmallVector<Value> shapes;
599  for (int64_t dim : llvm::seq<int64_t>(0, getRank(opOperand))) {
600  if (checkDimExpr.visit(shapeExprs[pos]))
601  shapes.push_back(createOrFoldDimOp(b, loc, opOperand->get(), dim));
602  else
603  shapes.push_back(allResultDimValues[pos]);
604  pos++;
605  }
606  reifiedReturnShapes.emplace_back(std::move(shapes));
607  }
608  return success();
609 }
610 
612  LinalgOp linalgOp = cast<LinalgOp>(op);
613  // Expect at least one output operand.
614  // This means an op that constructs a tensor out of indices cannot be a
615  // LinalgOp at the moment. For now this will have to be a special op until we
616  // have output shape operands that are not tensors.
617  int64_t numInputs = linalgOp.getNumInputs();
618  int64_t numOutputs = linalgOp.getNumOutputs();
619  if (numOutputs == 0)
620  return op->emitOpError("expected at least one output operand");
621  if (failed(OpTrait::impl::verifyNOperands(op, numInputs + numOutputs)))
622  return failure();
623  // Verify the number of results matches the number of output tensors.
624  if (op->getNumResults() != linalgOp.getOutputTensorOperands().size())
625  return op->emitOpError("expected the number of results (")
626  << op->getNumResults()
627  << ") to be equal to the number of output tensors ("
628  << linalgOp.getOutputTensorOperands().size() << ")";
629 
630  // Check all iterator types are known.
631  auto iteratorTypesRange =
632  linalgOp.iterator_types().getAsValueRange<StringAttr>();
633  for (StringRef iteratorType : iteratorTypesRange) {
634  if (!llvm::is_contained(getAllIteratorTypeNames(), iteratorType))
635  return op->emitOpError("unexpected iterator_type (")
636  << iteratorType << ")";
637  }
638 
639  // Before checking indexing maps, we need to make sure the attributes
640  // referenced by it are valid.
641  if (linalgOp.hasDynamicIndexingMaps())
642  if (failed(linalgOp.verifyIndexingMapRequiredAttributes()))
643  return failure();
644 
645  // All input/output operands must be indexed.
646  if (static_cast<int64_t>(linalgOp.indexing_maps().size()) !=
647  linalgOp.getNumInputsAndOutputs())
648  return op->emitOpError("expected the number of indexing_map (")
649  << linalgOp.indexing_maps().size()
650  << ") to be equal to the number of input/output operands ("
651  << linalgOp.getNumInputsAndOutputs() << ")";
652 
653  for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
654  AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
655 
656  // Symbols disallowed.
657  if (indexingMap.getNumSymbols() != 0)
658  return op->emitOpError("unexpected symbols in indexing_map #")
659  << opOperand->getOperandNumber();
660 
661  // Domain must be consistent.
662  unsigned numLoops = linalgOp.getNumLoops();
663  if (indexingMap.getNumDims() != numLoops)
664  return op->emitOpError("expected indexing_map #")
665  << opOperand->getOperandNumber() << " to have " << numLoops
666  << " dim(s) to match the number of loops";
667 
668  int64_t rank = linalgOp.getRank(opOperand);
669  if (indexingMap.getNumResults() != rank)
670  return op->emitOpError("expected operand rank (")
671  << rank << ") to match the result rank of indexing_map #"
672  << opOperand->getOperandNumber() << " ("
673  << indexingMap.getNumResults() << ")";
674  }
675 
676  SmallVector<unsigned> redDims;
677  linalgOp.getReductionDims(redDims);
678 
679  // Simplifying assumption: either full tensor or full buffer mode.
680  // This allows simpler verification of output operands vs result types
681  // without premature tracking of which operand is what in mixed-mode.
682  // TODO: relax when mixed-mode needs to pass verification.
683  if (!linalgOp.getOutputBufferOperands().empty() &&
684  !linalgOp.getOutputTensorOperands().empty())
685  return op->emitOpError(
686  "expected output operands to all have tensor type or "
687  "all have buffer type");
688 
689  for (OpOperand *opOperand : linalgOp.getOutputTensorOperands()) {
690  OpResult result = linalgOp.getTiedOpResult(opOperand);
691  if (result.getType() != opOperand->get().getType())
692  return op->emitOpError("expected type of operand #")
693  << opOperand->getOperandNumber() << " ("
694  << opOperand->get().getType() << ")"
695  << " to match type of corresponding result (" << result.getType()
696  << ")";
697  }
698 
699  // Output tensor indexing map may not depend on reduction indices.
700  for (OpOperand *opOperand : linalgOp.getOutputOperands()) {
701  AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
702  for (AffineExpr expr : indexingMap.getResults()) {
703  for (unsigned pos : redDims) {
704  if (expr.isFunctionOfDim(pos)) {
705  std::string exprStr;
706  {
707  llvm::raw_string_ostream os(exprStr);
708  os << expr;
709  }
710  return op->emitOpError(
711  "unexpected output tensor expression in indexing map #")
712  << (opOperand->getOperandNumber() - linalgOp.getNumInputs())
713  << " a.k.a '" << exprStr
714  << "' is function of reduction iterator 'd" << pos << "'";
715  }
716  }
717  }
718  }
719 
720  // Check the region has exactly one block.
721  if (linalgOp->getNumRegions() != 1 ||
722  !llvm::hasSingleElement(linalgOp->getRegion(0)))
723  return op->emitOpError("expects to have 1 region with 1 block");
724 
725  if (!linalgOp.getShapesToLoopsMap())
726  return op->emitOpError("expected the shape-to-loops map to be non-null");
727 
728  // Simplifying assumption: bbargs match 1-1 with shape operands elemental
729  // types.
730  // TODO: once ranked shape types are plugged in, we may want to drop the
731  // corresponding bbargs, that can never be read from. This will be subject to
732  // consistency discussions (i.e. what to do with output tensors whose bbarg is
733  // not used).
734  Block &block = linalgOp->getRegion(0).front();
735 
736  if (linalgOp.getNumInputsAndOutputs() != block.getNumArguments())
737  return op->emitOpError("expected as many non-induction variable region "
738  "arguments as the number of input/output operands");
739 
740  for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
741  Type elementType = getElementTypeOrSelf(opOperand->get());
742  Type argType = block.getArgument(opOperand->getOperandNumber()).getType();
743  if (elementType != argType)
744  return op->emitOpError("expected type of bb argument #")
745  << opOperand->getOperandNumber() << " (" << argType << ")"
746  << " to match element or self type of the corresponding operand ("
747  << elementType << ")";
748  }
749 
750  // Check if given shapes match to inferred shapes.
751  SmallVector<int64_t, 4> endLoopRangeValues = linalgOp.getStaticLoopRanges();
752  SmallVector<int64_t, 4> startLoopRangeValues(endLoopRangeValues.size(), 0);
753 
754  // Verify only static cases since we can't get exact dimension sizes and loop
755  // ranges for dynamic cases in this stage.
756  if (llvm::none_of(endLoopRangeValues, ShapedType::isDynamic)) {
757  for (int64_t &range : endLoopRangeValues)
758  range -= 1;
759  for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
760  AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
761  SmallVector<int64_t, 4> startIndices =
762  indexingMap.compose(startLoopRangeValues);
763  SmallVector<int64_t, 4> endIndices =
764  indexingMap.compose(endLoopRangeValues);
765  ArrayRef<int64_t> shape = linalgOp.getShape(opOperand);
766  for (auto dim : llvm::seq<int64_t>(0, shape.size())) {
767  // Ignore dynamic dimension or the case that the dimension size is 0
768  if (ShapedType::isDynamic(shape[dim]) || shape[dim] == 0)
769  continue;
770 
771  // The first index or last index should be the maximum or the minimum in
772  // the inferred index ranges since the range is increasing or
773  // decreasing. The size of dimensions of input/output operands and the
774  // maximum value + 1 in the inferred range should be the same. But, for
775  // now we check if the inferred ranges are in boundary of input/output
776  // operands' size or not in case that Affine Expressions are complicated
777  // such as d0 * 3
778  // + d1 since it is not easy to handle the issues.
779  // Found the case that this solution can't check, for example, (d0, d1)
780  // -> (d1 - d0)
781  int64_t inferredDimSize =
782  std::max(startIndices[dim], endIndices[dim]) + 1;
783  if (std::min(startIndices[dim], endIndices[dim]) < 0) {
784  std::string mapStr;
785  {
786  llvm::raw_string_ostream os(mapStr);
787  os << indexingMap;
788  }
789  return op->emitOpError(
790  "unexpected result less than 0 at expression #")
791  << dim << " in " << mapStr;
792  }
793  if (indexingMap.getResult(dim).dyn_cast<AffineDimExpr>()) {
794  if (inferredDimSize != shape[dim]) {
795  return op->emitOpError("inferred input/output operand #")
796  << opOperand->getOperandNumber()
797  << " has shape's dimension #" << dim << " to be "
798  << inferredDimSize << ", but found " << shape[dim];
799  }
800  } else {
801  if (inferredDimSize > shape[dim]) {
802  return op->emitOpError("inferred input/output operand #")
803  << opOperand->getOperandNumber()
804  << " has shape's dimension #" << dim
805  << " to be greater than or equal to " << inferredDimSize
806  << ", but found " << shape[dim];
807  }
808  }
809  }
810  }
811  }
812 
813  return success();
814 }
Affine binary operation expression.
Definition: AffineExpr.h:207
TODO: Remove this file when SCCP and integer range analysis have been ported to the new framework...
ArrayRef< StringRef > getAllIteratorTypeNames()
Use to encode that a particular iterator type has window semantics.
AffineMap inversePermutation(AffineMap map)
Returns a map of codomain to domain dimensions such that the first codomain dimension for a particula...
Definition: AffineMap.cpp:658
constexpr StringRef getParallelIteratorTypeName()
Use to encode that a particular iterator type has parallel semantics.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:458
MatchFillResult
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Definition: AffineMap.cpp:439
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:28
bool isaContractionOpInterface(LinalgOp linalgOp)
Checks whether linalgOp conforms to ContractionOpInterface.
unsigned getNumSymbols() const
Definition: AffineMap.cpp:298
unsigned getNumDims() const
Definition: AffineMap.cpp:294
This is a value defined by a result of an operation.
Definition: Value.h:425
Block represents an ordered list of Operations.
Definition: Block.h:29
bool visitDimExpr(AffineDimExpr dimExpr)
Value getOperand(unsigned idx)
Definition: Operation.h:274
LogicalResult verifyNOperands(Operation *op, unsigned numOperands)
Definition: Operation.cpp:701
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
Definition: LogicalResult.h:72
unsigned getNumOperands()
Definition: Operation.h:270
static T getAffineExprOfType(AffineExpr lhs, AffineExpr rhs)
Of the given two expressions returns one that is of type T (lhs gets preference over rhs) ...
static llvm::SmallDenseSet< unsigned > getPreservedDims(AffineMap map)
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value...
Definition: LogicalResult.h:68
bool visitConstantExpr(AffineConstantExpr constExpr)
Operation & front()
Definition: Block.h:144
LogicalResult verifyFillInterface(Operation *op)
Verify that op conforms to the FillOpInterface.
unsigned getPosition() const
Definition: AffineExpr.cpp:312
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
Definition: Traits.cpp:117
BlockArgument getArgument(unsigned i)
Definition: Block.h:120
An integer constant appearing in affine expression.
Definition: AffineExpr.h:232
static constexpr const bool value
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:48
Auxiliary range data structure to unpack the offset, size and stride operands into a list of triples...
AffineExpr getRHS() const
Definition: AffineExpr.cpp:307
AffineExpr getResult(unsigned idx) const
Definition: AffineMap.cpp:311
Base class for AffineExpr visitors/walkers.
bool visitAffineBinaryOpExpr(AffineBinaryOpExpr binaryOpExpr)
U dyn_cast() const
Definition: AffineExpr.h:281
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:380
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
AffineMap getSliceMap(unsigned start, unsigned length) const
Returns the map consisting of length expressions starting from start.
Definition: AffineMap.cpp:522
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
MatchContractionResult
AffineExpr getLHS() const
Definition: AffineExpr.cpp:304
MatchConvolutionResult
bool isProjectedPermutation(bool allowZeroInResults=false) const
Returns true if the AffineMap represents a subset (i.e.
Definition: AffineMap.cpp:478
unsigned getNumArguments()
Definition: Block.h:119
AffineMap concatAffineMaps(ArrayRef< AffineMap > maps)
Concatenates a list of maps into a single AffineMap, stepping over potentially empty maps...
Definition: AffineMap.cpp:703
Base type for affine expression.
Definition: AffineExpr.h:68
RHS of mul is always a constant or a symbolic expression.
SmallVector< Value, 4 > applyMapToValues(OpBuilder &b, Location loc, AffineMap map, ValueRange values)
Returns the values obtained by applying map to the list of values.
Definition: AffineOps.cpp:736
static WalkResult advance()
Definition: Visitors.h:51
unsigned getNumResults() const
Definition: AffineMap.cpp:302
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:133
A multi-dimensional affine map Affine map&#39;s are immutable like Type&#39;s, and they are uniqued...
Definition: AffineMap.h:41
U cast() const
Definition: AffineExpr.h:291
static WalkResult interrupt()
Definition: Visitors.h:50
LogicalResult verifyStructuredOpInterface(Operation *op)
Verify that op conforms to the invariants of StructuredOpInterface.
RetT walk(FnT &&callback)
Walk the operations in this block.
Definition: Block.h:273
static bool isAddMul(Block &block)
Detect whether res is any permutation of u5(u1(c) + u2(u3(a) * u4(b))) on the field (AddOpType...
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:307
static MatchContractionResult isContractionInterfaceImpl(Operation *op)
static MatchConvolutionResult isConvolutionInterfaceImpl(Operation *op)
Visitor to check if any of the given set of positions from AffineDimExprs are used within an AffineEx...
bool isa() const
Definition: AffineExpr.h:270
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:72
bool visitSymbolExpr(AffineSymbolExpr symbolExpr)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:85
RetTy visit(AffineExpr expr)
Operation * getTerminator()
Get the terminator operation of this block.
Definition: Block.cpp:230
AffineExprKind getKind() const
Return the classification for this type.
Definition: AffineExpr.cpp:25
static MatchFillResult isFillInterfaceImpl(Operation *op)
Type getType() const
Return the type of this value.
Definition: Value.h:118
A dimensional identifier appearing in an affine expression.
Definition: AffineExpr.h:216
Specialization of arith.constant op that returns an integer of index type.
Definition: Arithmetic.h:80
LogicalResult verifyConvolutionInterface(Operation *op)
Verify that op conforms to the ConvolutionOpInterface.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
This class represents an operand of an operation.
Definition: Value.h:251
static bool isChainOfUnaryOpsFrom(Value v, Value from)
Return true if the use-def chain from v to from consists of 0 or more unary single-operand operations...
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:328
LogicalResult verifyContractionInterface(Operation *op)
Verify that op conforms to ContractionOpInterface.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "&#39;dim&#39; op " which is convenient for verifiers...
Definition: Operation.cpp:518
bool isa() const
Definition: Types.h:246
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source...
Definition: Utils.cpp:195
constexpr StringRef getReductionIteratorTypeName()
Use to encode that a particular iterator type has reduction semantics.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:231
static void visit(Operation *op, DenseSet< Operation *> &visited)
Visits all the pdl.operand(s), pdl.result(s), and pdl.operation(s) connected to the given operation...
Definition: PDL.cpp:62
This class helps build Operations.
Definition: Builders.h:184
HasAffineDimExprVisitor(llvm::SmallBitVector positions)
bool canOpOperandsBeDroppedImpl(linalg::LinalgOp linalgOp, ArrayRef< OpOperand *> droppedOperands)
Implementation of the method that that check if given operands can be dropped, i.e.
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
A symbolic identifier appearing in an affine expression.
Definition: AffineExpr.h:224
static OpType getSingleOpOfType(Block &block)
Return the unique instance of OpType in block if it is indeed unique.