MLIR 22.0.0git
LoopEmitter.cpp
Go to the documentation of this file.
1//===- LoopEmitter.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "LoopEmitter.h"
10#include "CodegenUtils.h"
11
19
20using namespace mlir;
21using namespace mlir::sparse_tensor;
22
23//===----------------------------------------------------------------------===//
24// File local shorthand macros
25//===----------------------------------------------------------------------===//
26
27#define CMPI(p, l, r) \
28 (arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::p, (l), (r)) \
29 .getResult())
30
31#define C_IDX(v) (constantIndex(builder, loc, (v)))
32#define YIELD(vs) (scf::YieldOp::create(builder, loc, (vs)))
33#define ADDI(lhs, rhs) (arith::AddIOp::create(builder, loc, (lhs), (rhs)))
34#define ANDI(lhs, rhs) (arith::AndIOp::create(builder, loc, (lhs), (rhs)))
35#define SUBI(lhs, rhs) (arith::SubIOp::create(builder, loc, (lhs), (rhs)))
36#define MULI(lhs, rhs) (arith::MulIOp::create(builder, loc, (lhs), (rhs)))
37#define REMUI(lhs, rhs) (arith::RemUIOp::create(builder, loc, (lhs), (rhs)))
38#define DIVUI(lhs, rhs) (arith::DivUIOp::create(builder, loc, (lhs), (rhs)))
39#define SELECT(c, l, r) (arith::SelectOp::create(builder, loc, (c), (l), (r)))
40
41//===----------------------------------------------------------------------===//
42// Debugging utils
43//===----------------------------------------------------------------------===//
44
45#ifndef NDEBUG
46[[maybe_unused]] static void dumpIndexMemRef(OpBuilder &builder, Location loc,
47 Value memref) {
48 memref = memref::CastOp::create(
49 builder, loc, UnrankedMemRefType::get(builder.getIndexType(), 0), memref);
50 createFuncCall(builder, loc, "printMemrefInd", TypeRange{},
52}
53#endif
54
55//===----------------------------------------------------------------------===//
56// File local helper functions.
57//===----------------------------------------------------------------------===//
58
59// For index reduction loops, since the tensor are sliced into non-continuous
60// fragments, we need a triple [pLo, pHi, pPtr], in which the pair (pLo, pHi)
61// specifies the range of the fragment, and pPtr specifies the index of the
62// corresponding fragment in the child level (i.e., a pointer to the sliced
63// position array).
65 Level lvl) {
66 auto enc = getSparseTensorEncoding(tensor.getType());
67 return createOrFoldSliceOffsetOp(builder, loc, tensor, toDim(enc, lvl));
68}
69
71 Level lvl) {
72 auto enc = getSparseTensorEncoding(tensor.getType());
73 return createOrFoldSliceStrideOp(builder, loc, tensor, toDim(enc, lvl));
74}
75
76static bool isIntOrFPZero(Attribute attr) {
77 if (auto f = llvm::dyn_cast<FloatAttr>(attr); f && f.getValue().isZero())
78 return true;
79 if (auto i = llvm::dyn_cast<IntegerAttr>(attr); i && i.getValue().isZero())
80 return true;
81 return false;
82}
83
85 OpFoldResult ofr) {
86 if (std::optional<int64_t> i = getConstantIntValue(ofr); i.has_value())
87 return constantIndex(builder, loc, *i);
88 return cast<Value>(ofr);
89}
90
92 // TODO: this should be done through a folding pass after switching to
93 // `sparse_tensor.iterate`-based sparsification.
94 auto stt = tryGetSparseTensorType(t);
95 auto padOp = t.getDefiningOp<tensor::PadOp>();
96 if (padOp && stt.has_value() && stt->hasEncoding() &&
97 padOp.getSourceType().getEncoding() == stt->getEncoding() &&
98 stt->getEncoding().isIdentity()) {
99 // Try fusing padOp with zeros.
100 Attribute padCst;
101 if (matchPattern(padOp.getBody()->getTerminator(),
103 isIntOrFPZero(padCst)) {
104 return padOp.getSource();
105 }
106 }
107 return t;
108}
109
110//===----------------------------------------------------------------------===//
111// Sparse tensor loop emitter class implementations
112//===----------------------------------------------------------------------===//
113
114LoopEmitter::LoopEmitter(ValueRange tensors, StringAttr loopTag, bool hasOutput,
115 bool isSparseOut, unsigned numLoops,
116 DependentLvlGetter dimGetter,
117 SparseEmitStrategy emitStrategy) {
118 initialize(tensors, loopTag, hasOutput, isSparseOut, numLoops, dimGetter);
119}
120
121void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
122 bool isSparseOut, unsigned numLoops,
123 DependentLvlGetter dimGetter,
124 SparseEmitStrategy emitStrategy) {
125 // First initialize the top-level type of the fields.
126 this->loopTag = loopTag;
127 this->hasOutput = hasOutput;
128 this->isSparseOut = isSparseOut;
129 this->emitStrategy = emitStrategy;
130
131 const unsigned numManifestTensors = ts.size();
132 const unsigned synTensorId = numManifestTensors;
133 const unsigned numTensors = numManifestTensors + 1;
134 // tensors array (len == numManifestTensor).
135 this->tensors.assign(ts.begin(), ts.end());
136 // Arrays with len == numTensor.
137 this->valBuffer.assign(numTensors, nullptr);
138 this->lvls.resize(numTensors);
139 this->iters.resize(numTensors);
140 this->spIterVals.resize(numTensors);
141
142 // These zeros will be overwritten below, but we need to initialize
143 // them to something since we'll need random-access assignment.
144 this->loopStack.reserve(numLoops);
145 this->loopSeqStack.reserve(numLoops);
146
147 // Index-reduction related fields.
148 this->dependentLvlMap.assign(
149 numTensors, std::vector<std::vector<std::pair<TensorLevel, unsigned>>>());
150 this->sliceMeta.assign(
151 numTensors, std::vector<std::vector<std::pair<Value, unsigned>>>());
152 this->levelReducedDep.assign(numTensors, std::vector<unsigned>());
153
154 // Initialize nested types of `TensorId`-indexed fields.
155 for (TensorId tid = 0; tid < numTensors; tid++) {
156 Level lvlRank;
157 if (tid == synTensorId) {
158 // Synthetic tensor (conceptually) is an all-dense tensor with rank equal
159 // to the total number of loops (each level can potentially be mapped to
160 // one of the loop being generated).
161 lvlRank = numLoops;
162 } else {
163 const Value t = tensors[tid];
164 // a scalar or 0-dimension tensors
166 continue;
167
168 auto rtp = getRankedTensorType(t);
169 const SparseTensorType stt(rtp);
170 lvlRank = stt.getLvlRank();
171 }
172
173 lvls[tid].resize(lvlRank);
174 iters[tid].resize(lvlRank);
175 spIterVals[tid].resize(lvlRank);
176 loopHighs.assign(numLoops, nullptr);
177
178 // Slice-driven loops related initialization.
179 levelReducedDep[tid].assign(lvlRank, 0);
180 dependentLvlMap[tid].assign(
181 lvlRank, std::vector<std::pair<TensorLevel, unsigned>>());
182 sliceMeta[tid].assign(lvlRank, std::vector<std::pair<Value, unsigned>>());
183 if (dimGetter && !isSynTensor(tid)) {
184 for (Level l = 0; l < lvlRank; l++) {
185 std::vector<std::pair<LoopId, unsigned>> deps = dimGetter(tid, l);
186 // Sort the loop by order.
187 llvm::sort(deps, llvm::less_first());
188
189 dependentLvlMap[tid][l] = std::move(deps);
190 unsigned depends = dependentLvlMap[tid][l].size();
191 if (depends == 0)
192 continue;
193 sliceMeta[tid][l].reserve(depends);
194 }
195 }
196 }
197}
198
199std::unique_ptr<SparseIterator>
200LoopEmitter::makeLevelIterator(OpBuilder &builder, Location loc, TensorId t,
201 Level l) {
202 Value tensor = tensors[t];
203 auto stt = getSparseTensorType(tensor);
204 auto it = makeSimpleIterator(*lvls[t][l], emitStrategy);
205
206 Value folded = tryFoldTensors(tensor);
207 if (folded != tensor) {
208 auto padOp = tensor.getDefiningOp<tensor::PadOp>();
209 assert(padOp);
210 if (padOp.getPaddedDims().test(l)) {
211 Value low = unFoldOpIntResult(builder, loc, padOp.getMixedLowPad()[l]);
212 Value high = unFoldOpIntResult(builder, loc, padOp.getMixedHighPad()[l]);
213 auto padIt = makePaddedIterator(std::move(it), low, high, emitStrategy);
214 return padIt;
215 }
216 }
217
218 if (stt.hasEncoding() && stt.getEncoding().isSlice()) {
219 Value offset = genSliceOffset(builder, loc, tensor, l);
220 Value stride = genSliceStride(builder, loc, tensor, l);
221 auto slicedIt = makeSlicedLevelIterator(
222 std::move(it), offset, stride, lvls[t][l]->getSize(), emitStrategy);
223 return slicedIt;
224 }
225
226 return it;
227}
228
230 OpBuilder &builder, Location loc, LoopEmitter::OutputUpdater updater,
232
233 // For every manifest tensor, set up the values buffer.
234 for (TensorId t = 0, numTensors = getNumManifestTensors(); t < numTensors;
235 t++) {
236 // TODO: this should be done through a folding pass after switching to
237 // `sparse_tensor.iterate`-based sparsification.
238 const Value tensor = tryFoldTensors(tensors[t]);
239 const auto rtp = dyn_cast<RankedTensorType>(tensor.getType());
240 // Skips only scalar, zero ranked tensor still need to be bufferized and
241 // (probably) filled with zeros by users.
242 if (!rtp)
243 continue;
244
245 auto stt = getSparseTensorType(tensor);
246 const auto shape = rtp.getShape();
247
248 // Perform the required bufferization. Dense inputs materialize from the
249 // input tensors. Sparse inputs use sparse primitives to obtain the values.
250 // Delegates extra output initialization to clients.
251 bool isOutput = isOutputTensor(t);
252 Type elementType = stt.getElementType();
253 if (!stt.hasEncoding()) {
254 // Non-annotated dense tensors.
255 BaseMemRefType denseTp = MemRefType::get(shape, elementType);
256
257 // TODO: if we unconditionally use fully dynamic layout here, it breaks
258 // some vectorization passes which requires static stride = 1.
259 // Is it possible to call vectorization pass after bufferization?
260 if (llvm::isa_and_nonnull<tensor::ExtractSliceOp>(tensor.getDefiningOp()))
261 denseTp = bufferization::getMemRefTypeWithFullyDynamicLayout(rtp);
262
263 Value denseVal =
264 bufferization::ToBufferOp::create(builder, loc, denseTp, tensor);
265 // Dense outputs need special handling.
266 if (isOutput && updater)
267 denseVal = updater(builder, loc, denseVal, tensor);
268
269 valBuffer[t] = denseVal;
270 } else {
271 // Annotated sparse tensors.
272 // We also need the value buffer for all-dense annotated "sparse"
273 // tensors.
274 valBuffer[t] = ToValuesOp::create(builder, loc, tensor);
275 }
276 }
277
278 // The sparse iterator values will only be available after the loop is
279 // constructed.
280 if (emitStrategy == SparseEmitStrategy::kSparseIterator)
281 return;
282
283 // For every synthetic tensor, set the high bound by calling the callback.
284 if (synSetter) {
285 TensorId synId = getSynTensorId();
286 for (unsigned i = 0, e = loopHighs.size(); i < e; i++) {
287 Value sz = loopHighs[i] = synSetter(builder, loc, i);
288 auto [stl, it] = makeSynLevelAndIterator(sz, synId, i, emitStrategy);
289 lvls[synId][i] = std::move(stl);
290 iters[synId][i].emplace_back(std::move(it));
291 }
292 }
293
294 // For every manifest tensor:
295 // * For every level:
296 // * get the positions and coordinates buffers
297 // * get/compute the level-size, which is also used as the upper-bound
298 // on positions.
299 for (TensorId t = 0, numTensors = getNumManifestTensors(); t < numTensors;
300 t++) {
301 // TODO: this should be done through a folding pass after switching to
302 // `sparse_tensor.iterate`-based sparsification.
303 const Value tensor = tryFoldTensors(tensors[t]);
304 const auto rtp = dyn_cast<RankedTensorType>(tensor.getType());
305 if (!rtp)
306 // Skips only scalar, zero ranked tensor still need to be bufferized and
307 // (probably) filled with zeros by users.
308 continue;
309
310 auto stt = getSparseTensorType(tensor);
311 const Level lvlRank = stt.getLvlRank();
312
313 // Scan all levels of current tensor.
314 for (Level l = 0; l < lvlRank; l++) {
315 // Find upper bound in current dimension.
316 lvls[t][l] = makeSparseTensorLevel(builder, loc, tensor, t, l);
317 if (!dependentLvlMap[t][l].empty())
318 continue;
319
320 auto it = makeLevelIterator(builder, loc, t, l);
321 iters[t][l].emplace_back(std::move(it));
322 }
323 // NOTE: we can also prepare for 0 lvl here in advance, this will hoist
324 // some loop preparation from tensor iteration, but will also (undesirably)
325 // hoist the code ouside if-conditions.
326 }
327 // TODO: avoid treating subsection iterator as a special case.
328 initSubSectIterator(builder, loc);
329}
330
331void LoopEmitter::initSubSectIterator(OpBuilder &builder, Location loc) {
332 Value c0 = C_IDX(0);
333 for (TensorId t = 0, e = tensors.size(); t < e; t++) {
334 auto rtp = dyn_cast<RankedTensorType>(tensors[t].getType());
335 if (!rtp)
336 continue;
337
338 Level lvlRank = SparseTensorType(rtp).getLvlRank();
339
340 // Compute the dependency reduction order.
341 auto remDepStack = dependentLvlMap;
342 std::vector<std::tuple<LoopId, TensorId, Level>> depRedOrder;
343 for (Level lvl = 0; lvl < lvlRank; lvl++) {
344 // Reverse queue into a stack.
345 std::reverse(remDepStack[t][lvl].begin(), remDepStack[t][lvl].end());
346 for (auto [loop, coeff] : dependentLvlMap[t][lvl])
347 depRedOrder.emplace_back(loop, t, lvl);
348 }
349
350 if (depRedOrder.empty())
351 continue;
352
353 llvm::sort(depRedOrder, llvm::less_first());
354
355 SmallVector<SparseIterator *> lastIter(tensors.size(), nullptr);
356 for (auto [loop, t, lvl] : depRedOrder) {
357 std::pair<LoopId, unsigned> curDep = remDepStack[t][lvl].back();
358 assert(curDep.first == loop);
359 remDepStack[t][lvl].pop_back();
360
361 auto lvlIt = makeLevelIterator(builder, loc, t, lvl);
362 const SparseIterator *parent = lastIter[t];
363 if (!parent && lvl > 0) {
364 if (dependentLvlMap[t][lvl - 1].empty()) {
365 parent = iters[t][lvl - 1].back().get();
366 }
367 }
368
369 std::unique_ptr<SparseIterator> it;
370 if (!remDepStack[t][lvl].empty()) {
371 // Compute the subsection size.
372 Value size = c0;
373 for (auto [loop, stride] : remDepStack[t][lvl]) {
374 Value idxMax = SUBI(loopHighs[loop], C_IDX(1));
375 size = ADDI(size, ADDI(MULI(idxMax, C_IDX(stride)), C_IDX(1)));
376 }
377 it = makeNonEmptySubSectIterator(builder, loc, parent, loopHighs[loop],
378 std::move(lvlIt), size, curDep.second,
379 emitStrategy);
380 } else {
381 const SparseIterator &subSectIter = *iters[t][lvl].back();
382 it = makeTraverseSubSectIterator(builder, loc, subSectIter, *parent,
383 std::move(lvlIt), loopHighs[loop],
384 curDep.second, emitStrategy);
385 }
386 lastIter[t] = it.get();
387 iters[t][lvl].emplace_back(std::move(it));
388 }
389 }
390}
391
392void LoopEmitter::categorizeIterators(
393 ArrayRef<TensorLevel> tidLvls, SmallVectorImpl<SparseIterator *> &raIters,
394 SmallVectorImpl<SparseIterator *> &spIters) {
395 // Finds out the tensor level that we should use to generate loops. Amongs all
396 // the tensor levels, there is at most one sparse tensor level.
397 for (auto [t, l] : unpackTensorLevelRange(tidLvls)) {
398 SparseIterator *it = &getCurIterator(t, l);
399 if (it->randomAccessible())
400 raIters.push_back(it);
401 else
402 spIters.push_back(it);
403 }
404
405 llvm::stable_sort(spIters, [](auto lhs, auto rhs) {
406 // AffineUnRed > Affine > Slice > Trivial
407 return static_cast<uint8_t>(lhs->kind) > static_cast<uint8_t>(rhs->kind);
408 });
409}
410
412 ArrayRef<TensorLevel> tidLvls) {
413 // TODO: sort
414 assert(loopSeqStack.size() == loopStack.size());
415
416 if (emitStrategy != SparseEmitStrategy::kSparseIterator) {
417 // Prepares for all the tensors used in the current loop sequence.
418 for (auto [tid, lvl] : unpackTensorLevelRange(tidLvls)) {
419 levelReducedDep[tid][lvl]++;
420 prepareLoopOverTensorAtLvl(builder, loc, tid, lvl);
421 }
422 }
423
424 // Universal Index starts from 0.
425 loopSeqStack.emplace_back(C_IDX(0), tidLvls.vec());
426}
427
429 assert(loopSeqStack.size() == loopStack.size() + 1);
430
431 // Depending on whether the slice is resolved or not at current loop sequence,
432 // end them in different ways.
433 for (auto [tid, lvl] : unpackTensorLevelRange(loopSeqStack.back().second))
434 levelReducedDep[tid][lvl]--;
435
436 loopSeqStack.pop_back();
437}
438
440 switch (a.getKind()) {
442 // FIXME: since the one callsite in Sparsification passes in a
443 // level-expression, the `getPosition` must in fact be a `Dimension`.
444 // However, elsewhere we have been lead to expect that `loopIdToOrd`
445 // should be indexed by `LoopId`...
446 const auto loopId = cast<AffineDimExpr>(a).getPosition();
447 return loopStack[loopId].iv;
448 }
449 case AffineExprKind::Add: {
450 auto binOp = cast<AffineBinaryOpExpr>(a);
451 return ADDI(genAffine(builder, loc, binOp.getLHS()),
452 genAffine(builder, loc, binOp.getRHS()));
453 }
454 case AffineExprKind::Mul: {
455 auto binOp = cast<AffineBinaryOpExpr>(a);
456 return MULI(genAffine(builder, loc, binOp.getLHS()),
457 genAffine(builder, loc, binOp.getRHS()));
458 }
460 int64_t c = cast<AffineConstantExpr>(a).getValue();
461 return C_IDX(c);
462 }
463 default:
464 llvm_unreachable("unexpected affine subscript");
465 }
466}
467
468std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
469 OpBuilder &builder, Location loc, SparseIterator &iter,
470 MutableArrayRef<Value> reduc, bool isParallel) {
471
472 // TODO: support dynamic slices.
473 // Uses the first dimension here to build the loop bound (which is also the
474 // biggest range).
475
476 Value step = C_IDX(1);
477 auto [lo, hi] = iter.genForCond(builder, loc);
478 Operation *loop = nullptr;
479 Value iv;
480 if (isParallel) {
481 scf::ParallelOp parOp =
482 scf::ParallelOp::create(builder, loc, lo, hi, step, reduc);
483 builder.setInsertionPointToStart(parOp.getBody());
484 assert(parOp.getNumReductions() == reduc.size());
485 iv = parOp.getInductionVars()[0];
486
487 // In-place update on the reduction variable vector.
488 // Note that the init vals is not the actual reduction variables but instead
489 // used as a "special handle" to (temporarily) represent them. The
490 // expression on init vals will be moved into scf.reduce and replaced with
491 // the block arguments when exiting the loop (see exitForLoop). This is
492 // needed as we can not build the actual reduction block and get the actual
493 // reduction variable before users fill parallel loop body.
494 for (int i = 0, e = reduc.size(); i < e; i++)
495 reduc[i] = parOp.getInitVals()[i];
496 loop = parOp;
497 } else {
498 scf::ForOp forOp = scf::ForOp::create(builder, loc, lo, hi, step, reduc);
499 builder.setInsertionPointToStart(forOp.getBody());
500 iv = forOp.getInductionVar();
501
502 // In-place update on the reduction variable vector.
503 assert(forOp.getNumRegionIterArgs() == reduc.size());
504 for (int i = 0, e = reduc.size(); i < e; i++)
505 reduc[i] = forOp.getRegionIterArg(i);
506 loop = forOp;
507 }
508 assert(loop && iv);
509
510 Value crd = iv;
511 if (!iter.randomAccessible()) {
512 iter.linkNewScope(iv);
513 crd = iter.deref(builder, loc);
514 } else {
515 iter.locate(builder, loc, iv);
516 }
517
518 return {loop, crd};
519}
520
521std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(
522 OpBuilder &builder, Location loc, ArrayRef<SparseIterator *> spIters,
523 MutableArrayRef<Value> reduc, bool needsUniv) {
524 return genCoIteration(builder, loc, spIters, reduc,
525 needsUniv ? loopSeqStack.back().first : nullptr);
526}
527
528bool LoopEmitter::shouldIteratedByForLoop(ArrayRef<SparseIterator *> spIters) {
529 // If we need to co-iterate over two sparse tensors, we need a while loop
530 if (spIters.size() > 1)
531 return false;
532
533 if (spIters.size() == 1)
534 return spIters.front()->iteratableByFor();
535
536 return true;
537}
538
540 Location loc,
541 I64BitSet caseBit,
542 unsigned caseIdx,
544 auto coIterOp = cast<CoIterateOp>(loopStack.back().loop);
545 SmallVector<Attribute> cases(coIterOp.getCases().getAsRange<Attribute>());
546 cases[caseIdx] = builder.getI64IntegerAttr(caseBit);
547
548 coIterOp.setCasesAttr(builder.getArrayAttr(cases));
549 Region &caseRegion = coIterOp.getRegion(caseIdx);
550 assert(caseRegion.getBlocks().empty() &&
551 "re-initialize the same coiteration case region.");
552
553 // Each block starts with by a list of user-provided iteration arguments.
554 TypeRange iterArgsTps = coIterOp.getInitArgs().getTypes();
555 // Followed by a list of used coordinates of index type.
556 SmallVector<Type> blockArgTps(coIterOp.getCrdUsedLvls().count(),
557 builder.getIndexType());
558
559 blockArgTps.append(iterArgsTps.begin(), iterArgsTps.end());
560 // Ends with a set of iterators that defines the actually iteration space.
561 for (auto i : caseBit.bits()) {
562 blockArgTps.push_back(
563 cast<IterSpaceType>(coIterOp.getIterSpaces()[i].getType())
564 .getIteratorType());
565 }
566 SmallVector<Location> locs(blockArgTps.size(), loc);
567 caseRegion.emplaceBlock().addArguments(blockArgTps, locs);
568
569 // Entering the new region scope, updating the SSA chain.
570 builder.setInsertionPointToStart(&caseRegion.front());
571 // Update the coordinates.
572 loopStack.back().iv = coIterOp.getCrds(caseIdx).front();
573 // Updates loop iteration arguments.
574 ValueRange iterArgs = coIterOp.getRegionIterArgs(caseIdx);
575 llvm::copy(iterArgs, reduc.begin());
576 // Updates sparse iterator values.
577 ValueRange iters = coIterOp.getRegionIterators(caseIdx);
578 ArrayRef<TensorLevel> tidLvls = loopStack.back().tidLvls;
579 for (auto [i, tl] : llvm::enumerate(unpackTensorLevelRange(tidLvls))) {
580 if (caseBit[i]) {
581 spIterVals[tl.first][tl.second] = iters.front();
582 iters = iters.drop_front();
583 } else {
584 spIterVals[tl.first][tl.second] = nullptr;
585 }
586 }
587 // Must have consumed all iterator SSA values.
588 assert(iters.empty());
589 return &caseRegion;
590}
591
593 OpBuilder &builder, Location loc, ArrayRef<TensorLevel> tidLvls,
594 unsigned numCases, MutableArrayRef<Value> reduc, bool tryParallel,
595 bool needsUniv) {
596 // TODO: Argument `numCases` only used when generating iterator-based sparse
597 // loops. Simplify the code upon feature complete.
598 // TODO: handle coiteration with sparse iterator.
599 if (emitStrategy == SparseEmitStrategy::kSparseIterator) {
600 if (tidLvls.size() == 1) {
601 auto [tid, lvl] = unpackTensorLevel(tidLvls.front());
602 Value t = tensors[tid];
603
604 // Extract and iterate over the iteration space.
605 ExtractIterSpaceOp extractSpaceOp =
606 lvl == 0 ? ExtractIterSpaceOp::create(builder, loc, t)
607 : ExtractIterSpaceOp::create(builder, loc, t,
608 spIterVals[tid][lvl - 1], lvl);
609
610 IterateOp iterOp = IterateOp::create(
611 builder, loc, extractSpaceOp.getExtractedSpace(), reduc);
612 spIterVals[tid][lvl] = iterOp.getIterator();
613
614 // Update the reduction varaibles.
615 llvm::copy(iterOp.getRegionIterArgs(), reduc.begin());
616 // Set the insertion point to loop body.
617 builder.setInsertionPointToStart(iterOp.getBody());
618 loopStack.emplace_back(tidLvls, iterOp, builder.getInsertionBlock(),
619 iterOp.getCrds().front(), loopTag);
620 return iterOp;
621 }
622
623 // CoIteration Loops.
624 SmallVector<Value> spaces;
625 for (auto [tid, lvl] : unpackTensorLevelRange(tidLvls)) {
626 Value t = tensors[tid];
627 ExtractIterSpaceOp extractSpaceOp =
628 lvl == 0 ? ExtractIterSpaceOp::create(builder, loc, t)
629 : ExtractIterSpaceOp::create(builder, loc, t,
630 spIterVals[tid][lvl - 1], lvl);
631 spaces.push_back(extractSpaceOp.getExtractedSpace());
632 }
633 auto coIterOp = CoIterateOp::create(builder, loc, spaces, reduc, numCases);
634 // The CoIterationOp does not have insertion block nor induction variable.
635 // TODO: the `struct LoopInfo` should be simplied after full migration.
636 loopStack.emplace_back(tidLvls, coIterOp, /*insertion block*/ nullptr,
637 /*induction variable*/ nullptr, loopTag);
638 return coIterOp;
639 }
640
641 // TODO: support multiple return on parallel for?
642 tryParallel = tryParallel && reduc.size() <= 1;
643
646 categorizeIterators(tidLvls, raIters, spIters);
647
648 // Only when there is at least one sparse conditions, do we really need the
649 // universal index.
650 // TODO: Maybe we should instead requires merger to pass in a valid value at
651 // the first place instead of adjusting it in LoopEmitter?
652 needsUniv = !spIters.empty() && needsUniv;
653 // The TensorLevel used for loop conditions.
654 // If there is any sparse level, we need to use the sparse condition.
655 // If all levels are dense, we can pick arbitrary one (dense slice-driven loop
656 // can be generated using a simple ForOp as well).
657 Operation *l = nullptr;
658 Value iv = nullptr;
660
661 // Generates loops differently depending on whether we need a slice-driven
662 // loop or a simple level traversal loop.
663 if (shouldIteratedByForLoop(spIters) && !needsUniv) {
664 assert(spIters.size() <= 1);
665 SparseIterator &it = spIters.empty() ? *raIters.front() : *spIters.front();
666 std::tie(l, iv) =
667 emitForLoopOverTensorAtLvl(builder, loc, it, reduc, tryParallel);
668 tls.push_back(makeTensorLevel(it.tid, it.lvl));
669 } else {
670 for (auto *it : spIters) {
671 tls.push_back(makeTensorLevel(it->tid, it->lvl));
672 }
673
674 if (needsUniv)
675 for (auto *it : raIters)
676 tls.push_back(makeTensorLevel(it->tid, it->lvl));
677
678 std::tie(l, iv) =
679 emitWhileLoopOverTensorsAtLvls(builder, loc, spIters, reduc, needsUniv);
680 }
681
682 // Enter dense tensor levels.
683 for (SparseIterator *it : raIters)
684 it->locate(builder, loc, iv);
685
686 // NOTE: we can also prepare for next dim here in advance
687 // Pushes the loop into stack.
688 loopStack.emplace_back(tls, l, builder.getInsertionBlock(), iv, loopTag);
689 return l;
690}
691
693 TensorLevel tidLvl,
694 AffineExpr lvlExpr) {
695 auto [tid, lvl] = unpackTensorLevel(tidLvl);
696
697 const SparseIterator *parent =
698 lvl == 0 ? nullptr : iters[tid][lvl - 1].back().get();
699 auto &it = getCurIterator(tid, lvl);
700 it.genInit(builder, loc, parent);
701
702 assert(it.kind == IterKind::kTrivial && it.randomAccessible());
703 Value lvlCrd = genAffine(builder, loc, lvlExpr);
704 it.locate(builder, loc, lvlCrd);
705}
706
707void LoopEmitter::prepareLoopOverTensorAtLvl(OpBuilder &builder, Location loc,
708 TensorId tid, Level lvl) {
709 // if this is the first level, there is no parent iterator for the current
710 // iterator.
711 // If the current iterator is a subsection-based iterator, the parent iterator
712 // is memorized by the iterator.
713 bool hasParent = lvl == 0 || !dependentLvlMap[tid][lvl].empty();
714
715 const SparseIterator *parent =
716 hasParent ? nullptr : iters[tid][lvl - 1].back().get();
717 auto &it = getCurIterator(tid, lvl);
718 it.genInit(builder, loc, parent);
719
720 // Locates the randon accessible iterator to 0.
721 if (it.randomAccessible())
722 it.locate(builder, loc, C_IDX(0));
723}
724
725void LoopEmitter::exitForLoop(RewriterBase &rewriter, Location loc,
727 const LoopInfo &loopInfo = loopStack.back();
728 if (emitStrategy == SparseEmitStrategy::kSparseIterator) {
729 auto iterateOp = llvm::cast<IterateOp>(loopInfo.loop);
730 assert(reduc.size() == iterateOp.getNumResults());
731 sparse_tensor::YieldOp::create(rewriter, loc, reduc);
732 // Exit the loop.
733 rewriter.setInsertionPointAfter(iterateOp);
734 // In-place update reduction variables.
735 llvm::copy(iterateOp.getResults(), reduc.begin());
736 return;
737 }
738 if (auto forOp = llvm::dyn_cast<scf::ForOp>(loopInfo.loop)) {
739 if (!reduc.empty()) {
740 assert(reduc.size() == forOp.getNumResults());
741 scf::YieldOp::create(rewriter, loc, reduc);
742 }
743 // Exit the loop.
744 rewriter.setInsertionPointAfter(forOp);
745 // In-place update reduction variables.
746 llvm::copy(forOp.getResults(), reduc.begin());
747 } else {
748 auto parOp = llvm::cast<scf::ParallelOp>(loopInfo.loop);
749 if (!reduc.empty()) {
750 assert(reduc.size() == parOp.getInitVals().size() && reduc.size() == 1);
751 Operation *redExp = reduc.front().getDefiningOp();
752 // Reduction expression should have no use.
753 assert(redExp->getUses().empty());
754 // This must be a binary operation.
755 // NOTE: This is users' responsibility to ensure the operation are
756 // commutative.
757 assert(redExp->getNumOperands() == 2 && redExp->getNumResults() == 1);
758
759 Value redVal = parOp.getInitVals().front();
760 Value curVal;
761 if (redExp->getOperand(0) == redVal)
762 curVal = redExp->getOperand(1);
763 else if (redExp->getOperand(1) == redVal)
764 curVal = redExp->getOperand(0);
765 // One of the operands must be the init value (which is also the
766 // previous reduction value).
767 assert(curVal);
768#ifndef NDEBUG
769 // The reduction expression should be the only user of the reduction val
770 // inside the parallel for.
771 unsigned numUsers = 0;
772 for (Operation *op : redVal.getUsers()) {
773 if (op->getParentOp() == parOp)
774 numUsers++;
775 }
776 assert(numUsers == 1);
777#endif // NDEBUG
778
779 rewriter.setInsertionPointAfter(redExp);
780 auto redOp = scf::ReduceOp::create(rewriter, loc, curVal);
781 // Attach to the reduction op.
782 Block *redBlock = &redOp.getReductions().front().front();
783 rewriter.setInsertionPointToEnd(redBlock);
784 Operation *newRed = rewriter.clone(*redExp);
785 // Replaces arguments of the reduction expression by using the block
786 // arguments from scf.reduce.
787 rewriter.modifyOpInPlace(
788 newRed, [&]() { newRed->setOperands(redBlock->getArguments()); });
789 // Erases the out-dated reduction expression.
790 rewriter.eraseOp(redExp);
791 rewriter.setInsertionPointToEnd(redBlock);
792 scf::ReduceReturnOp::create(rewriter, loc, newRed->getResult(0));
793 }
794 rewriter.setInsertionPointAfter(parOp);
795 // In-place update reduction variables.
796 for (unsigned i = 0, e = parOp.getResults().size(); i < e; i++)
797 reduc[i] = parOp.getResult(i);
798 }
799}
800
801void LoopEmitter::exitWhileLoop(OpBuilder &builder, Location loc,
802 MutableArrayRef<Value> reduc) {
803 const LoopInfo &loopInfo = loopStack.back();
804 auto whileOp = llvm::cast<scf::WhileOp>(loopInfo.loop);
805 Value iv = loopInfo.iv;
806 Value one = C_IDX(1);
807
808 // Finalize the induction. Note that the induction could be performed
809 // in the individual if-branches to avoid re-evaluating the conditions.
810 // However, that would result in a rather elaborate forest of yield
811 // instructions during code generation. Moreover, performing the induction
812 // after the if-statements more closely resembles code generated by TACO.
813 SmallVector<Value> operands;
814 ValueRange whileRes = whileOp.getResults();
815
816 for (auto [tid, lvl] : unpackTensorLevelRange(loopInfo.tidLvls)) {
817 SparseIterator &it = getCurIterator(tid, lvl);
818 if (!it.randomAccessible()) {
819 // Forward the sparse iterator.
820 Value cmp = CMPI(eq, it.getCrd(), iv);
821 it.forwardIf(builder, loc, cmp);
822 operands.append(it.getCursor().begin(), it.getCursor().end());
823 // const Value newPos = whileOp->getResult(o++);
824 // Following loops continue iteration from the break point of the
825 // current while loop.
826 whileRes = it.linkNewScope(whileRes);
827 } else {
828 // Make sure randomly accessible (dense) iterator is set to the right
829 // position according to the universal index.
830 Value uniIdx = whileOp.getResults().back();
831 it.locate(builder, loc, uniIdx);
832 }
833 }
834
835 // Reduction value from users.
836 for (auto &i : reduc) {
837 operands.push_back(i);
838 // Update user reduction variables.
839 i = whileRes.front();
840 whileRes = whileRes.drop_front();
841 }
842
843 // An (optional) universal index.
844 if (operands.size() < whileOp.getNumResults()) {
845 assert(operands.size() + 1 == whileOp.getNumResults());
846 // The last one is the universial index.
847 operands.push_back(ADDI(iv, one));
848 // update the loop starting point of current loop sequence
849 loopSeqStack.back().first = whileOp->getResults().back();
850 }
851
852 if (!operands.empty())
853 YIELD(operands);
854
855 builder.setInsertionPointAfter(whileOp);
856}
857
860 // Clean up the values, it would help use to discover potential bug at a
861 // earlier stage (instead of silently using a wrong value).
862 const LoopInfo &loopInfo = loopStack.back();
863 if (emitStrategy == SparseEmitStrategy::kSparseIterator) {
864 Operation *p = loopInfo.loop;
865 if (isa<IterateOp>(p))
866 sparse_tensor::YieldOp::create(rewriter, loc, reduc);
867
868 // Exit the loop.
869 rewriter.setInsertionPointAfter(p);
870 // In-place update reduction variables.
871 llvm::copy(p->getResults(), reduc.begin());
872 loopStack.pop_back();
873 return;
874 }
875
876 // Sets the insertion point to the right position.
877 rewriter.setInsertionPointToEnd(loopInfo.userCodeBlock);
878 if (!loopInfo.userCodeBlock->empty() &&
879 llvm::isa<scf::YieldOp>(&loopInfo.userCodeBlock->back())) {
880 // scf::While/For inserts an implicit yield op when there is no loop
881 // iter args. In this case, we need to insert the code before the yield.
882 assert(loopInfo.userCodeBlock->back().getNumResults() == 0);
883 rewriter.setInsertionPoint(&loopInfo.userCodeBlock->back());
884 }
885
886 if (llvm::isa<scf::WhileOp>(loopInfo.loop)) {
887 exitWhileLoop(rewriter, loc, reduc);
888 } else {
889 exitForLoop(rewriter, loc, reduc);
890 }
891
892 assert(loopStack.size() == loopSeqStack.size());
893 loopStack.pop_back();
894}
895
896//===----------------------------------------------------------------------===//
897// Loop generation utils
898//===----------------------------------------------------------------------===//
899
900std::pair<Operation *, Value> sparse_tensor::genCoIteration(
901 OpBuilder &builder, Location loc, ArrayRef<SparseIterator *> spIters,
902 MutableArrayRef<Value> reduc, Value uniIdx, bool userReducFirst) {
903 // NOTE: the slice driven tensor-related reduction variable must
904 // appear before normal tensors.
905
906 // The set of induction variables for the while loop.
908
909 // TODO: remove the flag after full migration. Currently
910 // `sparse_tensor.coiterate` operation (must) put user provided reduction
911 // values at the front of the block list, while direct sparsification to scf
912 // loops put them at the end.
913 if (userReducFirst)
914 ivs.append(reduc.begin(), reduc.end());
915
916 // Construct the while-loop with a parameter for each coordinate.
917 for (SparseIterator *it : spIters) {
918 ValueRange itVals = it->getCursor();
919 ivs.append(itVals.begin(), itVals.end());
920 }
921
922 if (!userReducFirst)
923 ivs.append(reduc.begin(), reduc.end());
924
925 // Update universal index.
926 if (uniIdx)
927 ivs.push_back(uniIdx);
928
929 // Ensures all operands are valid.
930 assert(!llvm::is_contained(ivs, nullptr));
931 TypeRange types = ValueRange(ivs).getTypes();
932 auto whileOp = scf::WhileOp::create(builder, loc, types, ivs);
933
934 SmallVector<Location> locs(types.size(), loc);
935 Block *before = builder.createBlock(&whileOp.getBefore(), {}, types, locs);
936 Block *after = builder.createBlock(&whileOp.getAfter(), {}, types, locs);
937
938 // Generates loop conditions.
939 builder.setInsertionPointToStart(before);
940 ValueRange bArgs = before->getArguments();
941 Value whileCond = nullptr; // bool values for loop condition.
942
943 for (SparseIterator *it : spIters) {
944 auto [cond, remArgs] = it->genWhileCond(builder, loc, bArgs);
945 whileCond = !whileCond ? cond : ANDI(whileCond, cond);
946 bArgs = remArgs;
947 }
948 // The remaining block arguments are user-provided reduction values and an
949 // optional universal index. Make sure their sizes match.
950 assert(bArgs.size() == reduc.size() + (uniIdx ? 1 : 0));
951 scf::ConditionOp::create(builder, loc, whileCond, before->getArguments());
952
953 // Generates loop body.
954 builder.setInsertionPointToStart(after);
955 ValueRange aArgs = after->getArguments();
956
957 for (SparseIterator *it : spIters) {
958 aArgs = it->linkNewScope(aArgs);
959 // Dereference the iterator to cache the coordinate.
960 it->deref(builder, loc);
961 }
962
963 // In-place update on reduction variable.
964 for (unsigned i = 0, e = reduc.size(); i < e; i++)
965 reduc[i] = aArgs[i];
966
967 Value min;
968 // Finds the minimum coordinate
969 if (!uniIdx) {
970 for (SparseIterator *it : spIters) {
971 if (min) {
972 Value cmp = CMPI(ult, it->getCrd(), min);
973 min = SELECT(cmp, it->getCrd(), min);
974 } else {
975 min = it->getCrd();
976 }
977 }
978 } else {
979 // Otherwise, universal index is the minimal pos.
980 min = whileOp.getAfterArguments().back();
981 }
982
983 return {whileOp, min};
984}
985
986#undef CMPI
987#undef C_IDX
988#undef YIELD
989#undef ADDI
990#undef ANDI
991#undef SUBI
992#undef MULI
993#undef SELECT
lhs
static Value genSliceStride(OpBuilder &builder, Location loc, Value tensor, Level lvl)
static Value tryFoldTensors(Value t)
#define SUBI(lhs, rhs)
#define MULI(lhs, rhs)
static Value genSliceOffset(OpBuilder &builder, Location loc, Value tensor, Level lvl)
#define C_IDX(v)
#define ANDI(lhs, rhs)
#define CMPI(p, l, r)
static bool isIntOrFPZero(Attribute attr)
static void dumpIndexMemRef(OpBuilder &builder, Location loc, Value memref)
#define YIELD(vs)
#define SELECT(c, l, r)
#define ADDI(lhs, rhs)
static Value unFoldOpIntResult(OpBuilder &builder, Location loc, OpFoldResult ofr)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Base type for affine expression.
Definition AffineExpr.h:68
AffineExprKind getKind() const
Return the classification for this type.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Block represents an ordered list of Operations.
Definition Block.h:33
bool empty()
Definition Block.h:148
iterator_range< args_iterator > addArguments(TypeRange types, ArrayRef< Location > locs)
Add one argument to the argument list for each type specified in the list.
Definition Block.cpp:160
Operation & front()
Definition Block.h:153
Operation & back()
Definition Block.h:152
BlockArgListType getArguments()
Definition Block.h:87
IntegerAttr getI64IntegerAttr(int64_t value)
Definition Builders.cpp:112
ArrayAttr getArrayAttr(ArrayRef< Attribute > value)
Definition Builders.cpp:266
IndexType getIndexType()
Definition Builders.cpp:51
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
This class helps build Operations.
Definition Builders.h:207
Block * createBlock(Region *parent, Region::iterator insertPt={}, TypeRange argTypes={}, ArrayRef< Location > locs={})
Add new block with 'argTypes' arguments and set the insertion point to the end of it.
Definition Builders.cpp:430
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition Builders.cpp:562
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition Builders.h:431
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
void setInsertionPointToEnd(Block *block)
Sets the insertion point to the end of the specified block.
Definition Builders.h:436
Block * getInsertionBlock() const
Return the block the current insertion point belongs to.
Definition Builders.h:442
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition Builders.h:412
This class represents a single result from folding an operation.
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Value getOperand(unsigned idx)
Definition Operation.h:350
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition Operation.h:407
unsigned getNumOperands()
Definition Operation.h:346
void setOperands(ValueRange operands)
Replace the current operands of this operation with the ones provided in 'operands'.
result_range getResults()
Definition Operation.h:415
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
Definition Operation.h:846
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition Region.h:26
Block & front()
Definition Region.h:65
Block & emplaceBlock()
Definition Region.h:46
BlockListType & getBlocks()
Definition Region.h:45
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class provides an abstraction over the various different ranges of value types.
Definition TypeRange.h:37
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
user_range getUsers() const
Definition Value.h:218
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
A simple wrapper to encode a bitset of (at most 64) levels, currently used by sparse_tensor....
iterator_range< const_set_bits_iterator > bits() const
void exitCurrentLoop(RewriterBase &rewriter, Location loc, MutableArrayRef< Value > reduc={})
Generates code to exit the current loop (e.g., generates yields, forwards loop induction variables,...
void locateLvlAtAffineAddress(OpBuilder &builder, Location loc, TensorLevel tidLvl, AffineExpr lvlExpr)
Emits the address for a dense level based on the value evaluated by the provided affine expression.
void enterNewLoopSeq(OpBuilder &builder, Location loc, ArrayRef< TensorLevel > tidLvls)
Enters a new loop sequence, the loops within the same sequence starts from the break points of previo...
function_ref< Value(OpBuilder &builder, Location loc, Value memref, Value tensor)> OutputUpdater
Optional callback function to setup dense output tensors when initializing the loop emitter (e....
Definition LoopEmitter.h:59
Value genAffine(OpBuilder &builder, Location loc, AffineExpr a)
Generates code to compute an affine expression whose variables are LoopIds (i.e., cast<AffineDimExpr>...
function_ref< Value(OpBuilder &builder, Location loc, Level lvl)> SynTensorBoundSetter
Optional callback function to set the bound for the synthetic tensor, which essentially is the dense ...
Definition LoopEmitter.h:64
Region * enterCurrentCoIterationCase(OpBuilder &builder, Location loc, I64BitSet caseBit, unsigned caseIdx, MutableArrayRef< Value > reduc)
Operation * enterCoIterationOverTensorsAtLvls(OpBuilder &builder, Location loc, ArrayRef< TensorLevel > tidLvls, unsigned numCases, MutableArrayRef< Value > reduc={}, bool isParallel=false, bool needsUniv=false)
Emits a co-iteration loop over a set of tensors.
TensorLevel makeTensorLevel(TensorId t, Level l) const
Compresses a TensorId and Level into a TensorLevel.
std::pair< TensorId, Level > unpackTensorLevel(TensorLevel tidLvl) const
De-compresses a TensorLevel back to a pair of TensorId and Level.
unsigned getNumManifestTensors() const
Gets the total number of manifest tensors (excluding the synthetic tensor).
void initialize(ValueRange tensors, StringAttr loopTag=nullptr, bool hasOutput=false, bool isSparseOut=false, unsigned numLoops=0, DependentLvlGetter getter=nullptr, SparseEmitStrategy emitStrategy=SparseEmitStrategy::kFunctional)
Takes an array of input tensors, which the generated loops will iterate over.
auto unpackTensorLevelRange(ContainerTy &&c) const
Converts a range of TensorLevel to a range of std::pair<TensorId, Level>
void initializeLoopEmit(OpBuilder &builder, Location loc, OutputUpdater updater=nullptr, SynTensorBoundSetter synSetter=nullptr)
Starts a loop emitting session by generating all the buffers needed for iterating over the tensors.
void exitCurrentLoopSeq(OpBuilder &builder, Location loc)
Exits the current loop sequence, this will reset universal index to 0.
TensorId getSynTensorId() const
Gets the TensorId for synthetic tensor.
function_ref< std::vector< std::pair< LoopId, unsigned > >(TensorId, Level)> DependentLvlGetter
Definition LoopEmitter.h:77
Helper class that generates loop conditions, etc, to traverse a sparse tensor level.
void genInit(OpBuilder &b, Location l, const SparseIterator *p)
void locate(OpBuilder &b, Location l, Value crd)
std::pair< Value, ValueRange > genWhileCond(OpBuilder &b, Location l, ValueRange vs)
virtual std::pair< Value, Value > genForCond(OpBuilder &b, Location l)
virtual ValueRange forwardIf(OpBuilder &b, Location l, Value cond)
ValueRange linkNewScope(ValueRange pos)
Value deref(OpBuilder &b, Location l)
virtual bool randomAccessible() const =0
A wrapper around RankedTensorType, which has three goals:
Level getLvlRank() const
Returns the level-rank.
Value constantIndex(OpBuilder &builder, Location loc, int64_t i)
Generates a constant of index type.
Dimension toDim(SparseTensorEncodingAttr enc, Level l)
Convenience method to translate the given level to the corresponding dimension.
std::unique_ptr< SparseTensorLevel > makeSparseTensorLevel(OpBuilder &b, Location l, Value t, unsigned tid, Level lvl)
Helper function to create a TensorLevel object from given tensor.
std::unique_ptr< SparseIterator > makeTraverseSubSectIterator(OpBuilder &b, Location l, const SparseIterator &subsectIter, const SparseIterator &parent, std::unique_ptr< SparseIterator > &&wrap, Value loopBound, unsigned stride, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterates over a non-empty subsection created b...
unsigned TensorId
Tensor identifiers, chosen to be the BlockArgument::getArgNumber of the value passed to Merger::build...
Definition Merger.h:35
RankedTensorType getRankedTensorType(T &&t)
Convenience method to abbreviate casting getType().
uint64_t Level
The type of level identifiers and level-ranks.
std::pair< std::unique_ptr< SparseTensorLevel >, std::unique_ptr< SparseIterator > > makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl, SparseEmitStrategy strategy)
Helper function to create a synthetic SparseIterator object that iterates over a dense space specifie...
Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice slice for the sparse tensor slice, return a constant if the offs...
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
std::pair< Operation *, Value > genCoIteration(OpBuilder &builder, Location loc, ArrayRef< SparseIterator * > iters, MutableArrayRef< Value > reduc, Value uniIdx, bool userReducFirst=false)
bool isZeroRankedTensorOrScalar(Type type)
std::unique_ptr< SparseIterator > makePaddedIterator(std::unique_ptr< SparseIterator > &&sit, Value padLow, Value padHigh, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterates over a padded sparse level (the padde...
std::optional< SparseTensorType > tryGetSparseTensorType(Value val)
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
std::unique_ptr< SparseIterator > makeSimpleIterator(OpBuilder &b, Location l, const SparseIterationSpace &iterSpace)
Helper function to create a simple SparseIterator object that iterate over the entire iteration space...
func::CallOp createFuncCall(OpBuilder &builder, Location loc, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Creates a CallOp to the function reference returned by getFunc() in the builder's module.
std::unique_ptr< SparseIterator > makeSlicedLevelIterator(std::unique_ptr< SparseIterator > &&sit, Value offset, Value stride, Value size, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterates over a sliced space,...
Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice offset for the sparse tensor slice, return a constant if the off...
std::unique_ptr< SparseIterator > makeNonEmptySubSectIterator(OpBuilder &b, Location l, const SparseIterator *parent, Value loopBound, std::unique_ptr< SparseIterator > &&delegate, Value size, unsigned stride, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterate over the non-empty subsections set.
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition Utils.cpp:304
detail::NameOpMatcher m_Op(StringRef opName)
Matches a named operation.
Definition Matchers.h:379
@ Mul
RHS of mul is always a constant or a symbolic expression.
Definition AffineExpr.h:43
@ DimId
Dimensional identifier.
Definition AffineExpr.h:59
@ Constant
Constant integer.
Definition AffineExpr.h:57
SparseEmitStrategy
Defines a scope for reinterpret map pass.
Definition Passes.h:52
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
Definition Matchers.h:369