24 #define CMPI(p, lhs, rhs) \
25 (arith::CmpIOp::create(b, l, arith::CmpIPredicate::p, (lhs), (rhs)) \
28 #define C_FALSE (constantI1(b, l, false))
29 #define C_TRUE (constantI1(b, l, true))
30 #define C_IDX(v) (constantIndex(b, l, (v)))
31 #define YIELD(vs) (scf::YieldOp::create(b, l, (vs)))
32 #define ADDI(lhs, rhs) (arith::AddIOp::create(b, l, (lhs), (rhs)).getResult())
33 #define ORI(lhs, rhs) (arith::OrIOp::create(b, l, (lhs), (rhs)).getResult())
34 #define ANDI(lhs, rhs) (arith::AndIOp::create(b, l, (lhs), (rhs)).getResult())
35 #define SUBI(lhs, rhs) (arith::SubIOp::create(b, l, (lhs), (rhs)).getResult())
36 #define MULI(lhs, rhs) (arith::MulIOp::create(b, l, (lhs), (rhs)).getResult())
37 #define MINUI(lhs, rhs) (arith::MinUIOp::create(b, l, (lhs), (rhs)).getResult())
38 #define REMUI(lhs, rhs) (arith::RemUIOp::create(b, l, (lhs), (rhs)).getResult())
39 #define DIVUI(lhs, rhs) (arith::DivUIOp::create(b, l, (lhs), (rhs)).getResult())
40 #define SELECT(c, lhs, rhs) \
41 (arith::SelectOp::create(b, l, (c), (lhs), (rhs)).getResult())
49 template <
bool hasPosBuffer>
53 using BufferT = std::conditional_t<hasPosBuffer, std::array<Value, 2>,
54 std::array<Value, 1>>;
61 ValueRange getLvlBuffers()
const override {
return buffers; }
64 Value iv)
const override {
71 template <
typename T =
void,
typename = std::enable_if_t<hasPosBuffer, T>>
72 Value getPosBuf()
const {
76 Value getCrdBuf()
const {
77 if constexpr (hasPosBuffer)
83 const BufferT buffers;
88 DenseLevel(
unsigned tid,
Level lvl,
Value lvlSize)
92 llvm_unreachable(
"locate random-accessible level instead");
95 ValueRange getLvlBuffers()
const override {
return {}; }
99 assert(parentPos.size() == 1 &&
"Dense level can not be non-unique.");
100 assert(!inPadZone &&
"Not implemented");
101 Value p = parentPos.front();
103 return {posLo, lvlSize};
109 BatchLevel(
unsigned tid,
Level lvl,
Value lvlSize)
113 llvm_unreachable(
"locate random-accessible level instead");
116 ValueRange getLvlBuffers()
const override {
return {}; }
120 assert(!inPadZone &&
"Not implemented");
121 assert(parentPos.size() == 1 &&
"Dense level can not be non-unique.");
123 return {
C_IDX(0), lvlSize};
127 class CompressedLevel :
public SparseLevel<true> {
131 : SparseLevel(tid, lvl, lt, lvlSize, {posBuffer, crdBuffer}) {}
136 assert(parentPos.size() == 1 &&
137 "compressed level must be the first non-unique level.");
139 auto loadRange = [&b, l, parentPos, batchPrefix,
this]() ->
ValuePair {
140 Value p = parentPos.front();
149 if (inPadZone ==
nullptr)
153 scf::IfOp posRangeIf = scf::IfOp::create(b, l, types, inPadZone,
true);
159 scf::YieldOp::create(b, l, emptyRange);
163 auto [pLo, pHi] = loadRange();
165 scf::YieldOp::create(b, l, loadedRange);
168 ValueRange posRange = posRangeIf.getResults();
169 return {posRange.front(), posRange.back()};
173 class LooseCompressedLevel :
public SparseLevel<true> {
177 : SparseLevel(tid, lvl, lt, lvlSize, {posBuffer, crdBuffer}) {}
181 assert(parentPos.size() == 1 &&
182 "loose-compressed level must be the first non-unique level.");
183 assert(!inPadZone &&
"Not implemented");
185 Value p = parentPos.front();
195 class SingletonLevel :
public SparseLevel<false> {
199 : SparseLevel(tid, lvl, lt, lvlSize, {crdBuffer}) {}
203 assert(parentPos.size() == 1 || parentPos.size() == 2);
204 assert(!inPadZone &&
"Not implemented");
205 Value p = parentPos.front();
206 Value segHi = parentPos.size() == 2 ? parentPos.back() :
nullptr;
208 if (segHi ==
nullptr)
216 std::pair<Value, Value> parentRange)
const override {
222 class NOutOfMLevel :
public SparseLevel<false> {
226 : SparseLevel(tid, lvl, lt, lvlSize, {crdBuffer}) {}
230 assert(parentPos.size() == 1 &&
isUnique() &&
231 "n:m level can not be non-unique.");
232 assert(!inPadZone &&
"Not implemented");
251 auto ifOp = scf::IfOp::create(b, l, ifRetTypes, it.
genNotEnd(b, l),
true);
262 return ifOp.getResults();
298 unsigned cursorValCnt)
300 stl(stl), cursorValsStorage(cursorValCnt, nullptr) {
301 assert(getCursor().size() == cursorValCnt);
310 bool isBatchIterator()
const override {
313 bool randomAccessible()
const override {
329 class TrivialIterator :
public ConcreteIterator {
341 std::string getDebugInterfacePrefix()
const override {
342 return std::string(
"trivial<") + stl.
toString() +
">";
350 ret.push_back(getItPos());
351 if (randomAccessible()) {
354 ret.push_back(posLo);
356 ret.push_back(posHi);
362 assert(vs.size() == 2);
364 if (randomAccessible())
374 if (randomAccessible())
375 return {deref(b, l), upperBound(b, l)};
376 return std::make_pair(getItPos(), posHi);
381 return CMPI(ult, getItPos(), posHi);
385 if (randomAccessible()) {
386 updateCrd(
SUBI(getItPos(), posLo));
388 updateCrd(stl.
peekCrdAt(b, l, getBatchCrds(), getItPos()));
399 Value curPos = getCursor().front();
400 Value nxPos = forward(b, l).front();
401 seek(
SELECT(cond, nxPos, curPos));
406 assert(randomAccessible());
408 seek(
ADDI(crd, posLo));
410 if (isBatchIterator()) {
412 assert(batchCrds.size() > lvl);
413 batchCrds[lvl] = crd;
417 Value getItPos()
const {
return getCursor().front(); }
421 class DedupIterator :
public ConcreteIterator {
435 seek({posLo, genSegmentHigh(b, l, posLo)});
443 std::string getDebugInterfacePrefix()
const override {
444 return std::string(
"dedup<") + stl.
toString() +
">";
462 std::tie(posLo, posHi) = stl.
peekRangeAt(b, l, batchPrefix, pPos);
464 seek({posLo, genSegmentHigh(b, l, posLo)});
469 ret.append(getCursor().begin(), getCursor().end());
470 ret.push_back(posHi);
474 assert(vs.size() == 3);
475 seek(vs.take_front(getCursor().size()));
480 return CMPI(ult, getPos(), posHi);
484 updateCrd(stl.
peekCrdAt(b, l, getBatchCrds(), getPos()));
489 Value nxPos = getSegHi();
490 seek({nxPos, genSegmentHigh(b, l, nxPos)});
494 Value getPos()
const {
return getCursor()[0]; }
495 Value getSegHi()
const {
return getCursor()[1]; }
504 unsigned extraCursorVal = 0)
508 wrap->setSparseEmitStrategy(strategy);
512 return wrap->getSparseEmitStrategy();
516 return wrap->getCursorValTypes(b);
518 bool isBatchIterator()
const override {
return wrap->isBatchIterator(); }
519 bool randomAccessible()
const override {
return wrap->randomAccessible(); };
520 bool iteratableByFor()
const override {
return wrap->iteratableByFor(); };
524 ValueRange getCurPosition()
const override {
return wrap->getCurPosition(); }
527 wrap->genInit(b, l, parent);
530 return wrap->genNotEndImpl(b, l);
533 return wrap->forward(b, l);
536 return wrap->upperBound(b, l);
540 return wrap->derefImpl(b, l);
544 return wrap->locate(b, l, crd);
550 std::unique_ptr<SparseIterator>
wrap;
557 class FilterIterator :
public SimpleWrapIterator {
562 return DIVUI(
SUBI(wrapCrd, offset), stride);
566 return ADDI(
MULI(crd, stride), offset);
576 FilterIterator(std::unique_ptr<SparseIterator> &&
wrap,
Value offset,
579 stride(stride), size(size) {}
586 std::string getDebugInterfacePrefix()
const override {
587 return std::string(
"filter<") +
wrap->getDebugInterfacePrefix() +
">";
590 bool iteratableByFor()
const override {
return randomAccessible(); };
595 wrap->genInit(b, l, parent);
596 if (!randomAccessible()) {
599 forwardIf(b, l, genShouldFilter(b, l));
603 wrap->locate(b, l, offset);
610 updateCrd(fromWrapCrd(b, l,
wrap->deref(b, l)));
615 assert(randomAccessible());
616 wrap->locate(b, l, toWrapCrd(b, l, crd));
622 Value offset, stride, size;
629 class PadIterator :
public SimpleWrapIterator {
632 PadIterator(std::unique_ptr<SparseIterator> &&
wrap,
Value padLow,
635 wrap->randomAccessible() ? 1 : 0),
636 padLow(padLow), padHigh(padHigh) {}
643 std::string getDebugInterfacePrefix()
const override {
644 return std::string(
"pad<") +
wrap->getDebugInterfacePrefix() +
">";
649 if (randomAccessible())
650 return {getCrd(), upperBound(b, l)};
651 return wrap->genForCond(b, l);
656 ValueRange getCurPosition()
const override {
return getCursor(); }
661 if (randomAccessible())
669 return ADDI(
ADDI(
wrap->upperBound(b, l), padLow), padHigh);
674 updateCrd(
ADDI(
wrap->deref(b, l), padLow));
679 assert(randomAccessible());
680 wrap->locate(b, l,
SUBI(crd, padLow));
685 getMutCursorVals().back() =
ORI(inPadLow, inPadHigh);
690 Value padLow, padHigh;
700 std::unique_ptr<SparseIterator> &&delegate,
703 parent(parent), delegate(std::move(delegate)),
704 tupleSz(this->delegate->
serialize().size()), subSectSz(subSectSz) {
705 auto *p = dyn_cast_or_null<NonEmptySubSectIterator>(parent);
708 maxTupleCnt =
C_IDX(1);
709 }
else if (p->lvl == lvl) {
711 maxTupleCnt = p->maxTupleCnt;
712 assert(
false &&
"Not implemented.");
715 assert(p->lvl + 1 == lvl);
716 maxTupleCnt =
MULI(p->maxTupleCnt, p->subSectSz);
720 if (randomAccessible())
722 subSectPosBuf = allocSubSectPosBuf(b, l);
730 std::string getDebugInterfacePrefix()
const override {
731 return std::string(
"ne_sub<") + delegate->getDebugInterfacePrefix() +
">";
743 return memref::AllocaOp::create(
751 memref::StoreOp::create(b, l, start, subSectPosBuf,
756 return memref::LoadOp::create(b, l, subSectPosBuf,
762 assert(itVals.size() == tupleSz);
763 for (
unsigned i = 0; i < tupleSz; i++) {
764 memref::StoreOp::create(b, l, itVals[i], subSectPosBuf,
770 Value tupleId)
const {
772 for (
unsigned i = 0; i < tupleSz; i++) {
773 Value v = memref::LoadOp::create(b, l, subSectPosBuf,
780 bool isSubSectRoot()
const {
781 return !parent || !llvm::isa<NonEmptySubSectIterator>(parent);
787 TraverseBuilder builder)
const;
789 bool isBatchIterator()
const override {
return delegate->isBatchIterator(); }
790 bool randomAccessible()
const override {
791 return delegate->randomAccessible();
793 bool iteratableByFor()
const override {
return randomAccessible(); };
795 auto *p = dyn_cast_or_null<NonEmptySubSectIterator>(parent);
797 p && p->lvl == lvl ? p->upperBound(b, l) : delegate->upperBound(b, l);
807 delegate->locate(b, l, absOff);
809 assert(parent->
lvl + 1 == lvl);
816 return SUBI(wrapCrd, getAbsOff());
826 auto *p = dyn_cast_or_null<NonEmptySubSectIterator>(parent);
827 if (p && p->lvl == lvl)
828 crd =
SUBI(getAbsOff(), p->getAbsOff());
837 Value getMinCrd()
const {
return subSectMeta[0]; }
838 Value getAbsOff()
const {
return subSectMeta[1]; }
839 Value getNotEnd()
const {
return subSectMeta[2]; }
842 std::unique_ptr<SparseIterator> delegate;
845 const unsigned tupleSz;
847 Value maxTupleCnt, tupleCnt;
851 const Value subSectSz;
857 class SubSectIterator;
861 struct SubSectIterHelper {
862 explicit SubSectIterHelper(
const SubSectIterator &iter);
863 explicit SubSectIterHelper(
const NonEmptySubSectIterator &subSect);
872 const NonEmptySubSectIterator &subSect;
878 SubSectIterator(
const NonEmptySubSectIterator &subSect,
880 std::unique_ptr<SparseIterator> &&
wrap)
882 wrap->randomAccessible() ? 0 : 1),
883 subSect(subSect),
wrap(std::move(
wrap)), parent(parent), helper(*this) {
884 assert(subSect.tid == tid && subSect.lvl == lvl);
893 std::string getDebugInterfacePrefix()
const override {
894 return std::string(
"subsect<") +
wrap->getDebugInterfacePrefix() +
">";
898 if (!randomAccessible())
903 bool isBatchIterator()
const override {
return wrap->isBatchIterator(); }
904 bool randomAccessible()
const override {
return wrap->randomAccessible(); };
905 bool iteratableByFor()
const override {
return randomAccessible(); };
907 return subSect.subSectSz;
910 ValueRange getCurPosition()
const override {
return wrap->getCurPosition(); };
913 if (randomAccessible()) {
914 return ADDI(getCrd(), nxLvlTupleStart);
916 return ADDI(getCursor().back(), nxLvlTupleStart);
920 if (randomAccessible()) {
921 if (
auto *p = llvm::dyn_cast<SubSectIterator>(&parent)) {
922 assert(p->lvl + 1 == lvl);
923 wrap->genInit(b, l, p);
925 nxLvlTupleStart =
MULI(subSect.subSectSz, p->getNxLvlTupleId(b, l));
927 assert(subSect.lvl == lvl && subSect.isSubSectRoot());
928 wrap->deserialize(subSect.delegate->serialize());
929 nxLvlTupleStart =
C_IDX(0);
933 assert(!randomAccessible());
934 assert(getCursor().size() ==
wrap->getCursor().size() + 1);
937 getMutCursorVals().back() =
C_IDX(0);
939 if (
auto *p = llvm::dyn_cast<SubSectIterator>(&parent)) {
940 assert(p->lvl + 1 == lvl);
941 tupleId = p->getNxLvlTupleId(b, l);
943 assert(subSect.lvl == lvl && subSect.isSubSectRoot());
946 nxLvlTupleStart = subSect.loadNxLvlStart(b, l, tupleId);
947 helper.deserializeFromTupleId(b, l, tupleId);
951 helper.locate(b, l, crd);
956 return helper.genNotEnd(b, l);
960 Value crd = helper.deref(b, l);
966 helper.forward(b, l);
967 assert(!randomAccessible());
968 assert(getCursor().size() ==
wrap->getCursor().size() + 1);
969 getMutCursorVals().back() =
ADDI(getCursor().back(),
C_IDX(1));
973 Value nxLvlTupleStart;
975 const NonEmptySubSectIterator &subSect;
976 std::unique_ptr<SparseIterator>
wrap;
979 SubSectIterHelper helper;
1019 args.push_back(crd);
1054 auto ifOp = scf::IfOp::create(b, l,
getCursor().getTypes(), cond,
true);
1064 seek(ifOp.getResults());
1069 auto whileOp = scf::WhileOp::create(
1073 Value inBound = CMPI(ult, ivs.front(), posHi);
1074 auto ifInBound = scf::IfOp::create(b, l, b.getI1Type(), inBound, true);
1076 OpBuilder::InsertionGuard guard(b);
1078 b.setInsertionPointToStart(ifInBound.thenBlock());
1079 Value headCrd = stl.peekCrdAt(b, l, getBatchCrds(), pos);
1080 Value tailCrd = stl.peekCrdAt(b, l, getBatchCrds(), ivs.front());
1081 Value isDup = CMPI(eq, headCrd, tailCrd);
1084 b.setInsertionPointToStart(ifInBound.elseBlock());
1085 YIELD(constantI1(b, l, false));
1087 scf::ConditionOp::create(b, l, ifInBound.getResults()[0], ivs);
1095 return whileOp.getResult(0);
1100 Value crd = fromWrapCrd(b, l, wrapCrd);
1102 Value notlegit =
CMPI(ne, toWrapCrd(b, l, crd), wrapCrd);
1104 notlegit =
ORI(
CMPI(ult, wrapCrd, offset), notlegit);
1106 notlegit =
ORI(
CMPI(uge, crd, size), notlegit);
1114 Value notLegit = genCrdNotLegitPredicate(b, l, wrapCrd);
1117 return llvm::getSingleElement(r);
1121 assert(!
wrap->randomAccessible());
1125 Value crd = fromWrapCrd(b, l, wrapCrd);
1127 return {
CMPI(ult, crd, size)};
1129 return llvm::getSingleElement(r);
1133 assert(!randomAccessible());
1147 whileArgs.push_back(isFirst);
1148 auto whileOp = scf::WhileOp::create(
1149 b, l,
ValueRange(whileArgs).getTypes(), whileArgs,
1159 genCrdNotLegitPredicate(b, l, wrapCrd);
1160 Value crd = fromWrapCrd(b, l, wrapCrd);
1162 ret =
ORI(ret, llvm::getSingleElement(isFirst));
1165 scf::ConditionOp::create(b, l, cont.front(), ivs);
1170 wrap->forward(b, l);
1172 yieldVals.push_back(
constantI1(b, l,
false));
1177 linkNewScope(whileOp.getResults());
1181 SubSectIterHelper::SubSectIterHelper(
const NonEmptySubSectIterator &subSect)
1182 : subSect(subSect),
wrap(*subSect.delegate) {}
1184 SubSectIterHelper::SubSectIterHelper(
const SubSectIterator &iter)
1185 : subSect(iter.subSect),
wrap(*iter.
wrap) {}
1189 assert(!subSect.randomAccessible());
1190 wrap.deserialize(subSect.loadCursorVals(b, l, tupleId));
1194 Value absCrd =
ADDI(crd, subSect.getAbsOff());
1195 wrap.locate(b, l, absCrd);
1199 assert(!
wrap.randomAccessible());
1203 Value crd =
SUBI(wrapCrd, subSect.getAbsOff());
1205 return {
CMPI(ult, crd, subSect.subSectSz)};
1207 return llvm::getSingleElement(r);
1212 Value crd = subSect.toSubSectCrd(b, l, wrapCrd);
1217 return wrap.forward(b, l);
1220 ValueRange NonEmptySubSectIterator::inflateSubSectTree(
1223 SubSectIterHelper helper(*
this);
1224 if (!randomAccessible()) {
1228 iterArgs.push_back(
C_IDX(0));
1229 iterArgs.append(reduc.begin(), reduc.end());
1230 auto forEachLeaf = scf::ForOp::create(
1231 b, l,
C_IDX(0), tupleCnt,
C_IDX(1), iterArgs,
1235 helper.deserializeFromTupleId(b, l, tupleId);
1237 Value cnt = iterArgs.front();
1241 helper.subSect.storeNxLvlStart(b, l, tupleId, cnt);
1244 whileArgs.append(iterArgs.begin(), iterArgs.end());
1246 auto whileOp = scf::WhileOp::create(
1247 b, l,
ValueRange(whileArgs).getTypes(), whileArgs,
1250 helper.wrap.linkNewScope(ivs);
1251 scf::ConditionOp::create(b, l, helper.genNotEnd(b, l), ivs);
1255 ValueRange remIter = helper.wrap.linkNewScope(ivs);
1256 Value cnt = remIter.front();
1262 nxIter.append(userNx.begin(), userNx.end());
1265 ValueRange res = helper.wrap.linkNewScope(whileOp.getResults());
1268 return forEachLeaf.getResults().drop_front();
1271 assert(randomAccessible());
1276 assert(!parent || parent->lvl + 1 == lvl);
1277 delegate->genInit(b, l, parent);
1278 auto forOp = scf::ForOp::create(
1281 helper.locate(b, l, crd);
1285 return forOp.getResults();
1288 if (isSubSectRoot()) {
1289 return visitDenseSubSect(b, l, parent, reduc);
1292 auto *p = llvm::cast<NonEmptySubSectIterator>(parent);
1293 assert(p->lvl + 1 == lvl);
1294 return p->inflateSubSectTree(b, l, reduc, visitDenseSubSect);
1300 if (isBatchIterator() && batchCrds.size() <= stl.
lvl)
1301 batchCrds.resize(stl.
lvl + 1,
nullptr);
1305 Value inPadZone =
nullptr;
1313 inPadZone = pPos.back();
1314 pPos = pPos.drop_back();
1319 std::tie(posLo, posHi) = stl.
peekRangeAt(b, l, batchPrefix, pPos, inPadZone);
1327 if (!isSubSectRoot()) {
1328 assert(parent->
lvl + 1 == lvl);
1329 if (randomAccessible()) {
1337 auto *p = cast<NonEmptySubSectIterator>(parent);
1348 assert(parent->
lvl + 1 == lvl && reduc.size() == 2);
1349 Value minCrd = reduc.front();
1350 Value tupleId = reduc.back();
1353 SubSectIterHelper helper(*
this);
1354 helper.wrap.genInit(b, l, parent);
1360 Value min = MINUI(crd, minCrd);
1366 storeCursorVals(b, l, tupleId, helper.wrap.serialize());
1368 return {minCrd, tupleId};
1370 assert(result.size() == 2);
1371 tupleCnt = result.back();
1373 Value minCrd = result.front();
1376 seek({minCrd, absOff, notEnd});
1382 assert(isSubSectRoot());
1386 delegate->genInit(b, l, parent);
1387 if (randomAccessible()) {
1393 tupleCnt =
C_IDX(1);
1395 storeCursorVals(b, l, c0, delegate->serialize());
1398 b, l, *delegate, elseRet,
1401 return {crd, offset,
C_TRUE};
1408 assert(!randomAccessible());
1421 Value fastPathP =
CMPI(ugt, getMinCrd(), getAbsOff());
1422 auto ifOp = scf::IfOp::create(b, l, getCursor().getTypes(), fastPathP,
true);
1443 b, l, c0, tupleCnt, c1, loopArgs,
1446 Value tupleId = ivs.front();
1447 SubSectIterHelper helper(*
this);
1448 helper.deserializeFromTupleId(b, l, tupleId);
1451 b, l, *delegate, iterArgs,
1456 Value isMin =
CMPI(eq, crd, getMinCrd());
1457 delegate->forwardIf(b, l, isMin);
1459 auto ifIsMin = scf::IfOp::create(b, l, isMin,
false);
1461 storeCursorVals(b, l, tupleId, delegate->serialize());
1465 Value nxMin = iterArgs[0];
1469 Value nx = arith::MinUIOp::create(
1476 scf::ForOp forOp = loopNest.
loops.front();
1479 Value nxMinCrd = forOp.getResult(0);
1480 Value nxNotEnd = forOp.getResult(1);
1485 Value nxMinCrd = ifOp.getResult(0);
1486 Value nxAbsOff = ifOp.getResult(1);
1487 Value nxNotEnd = ifOp.getResult(2);
1490 Value minAbsOff =
ADDI(getAbsOff(), c1);
1491 nxAbsOff = arith::MaxUIOp::create(b, l, minAbsOff, nxAbsOff);
1493 seek(
ValueRange{nxMinCrd, nxAbsOff, nxNotEnd});
1495 Value crd = deref(b, l);
1496 nxNotEnd =
ANDI(nxNotEnd,
CMPI(ult, crd, upperBound(b, l)));
1498 seek(
ValueRange{nxMinCrd, nxAbsOff, nxNotEnd});
1508 std::pair<Level, Level> lvlRange,
ValueRange parentPos)
1510 auto [lvlLo, lvlHi] = lvlRange;
1513 if (parentPos.empty())
1516 for (
Level lvl = lvlLo; lvl < lvlHi; lvl++)
1519 bound = lvls.front()->peekRangeAt(b, l, {}, parentPos);
1520 for (
auto &lvl :
getLvlRef().drop_front())
1521 bound = lvl->collapseRangeBetween(b, l, {}, bound);
1525 IterSpaceType dstTp,
ValueRange values,
unsigned int tid) {
1529 unsigned bufferCnt = 0;
1530 if (lt.isWithPosLT())
1532 if (lt.isWithCrdLT())
1535 ValueRange buffers = values.take_front(bufferCnt);
1536 values = values.drop_front(bufferCnt);
1539 Value sz = values.front();
1540 values = values.drop_front();
1541 space.lvls.push_back(
1545 space.bound = std::make_pair(values[0], values[1]);
1546 values = values.drop_front(2);
1549 assert(values.empty());
1553 std::unique_ptr<SparseIterator>
1563 std::unique_ptr<SparseTensorLevel>
1565 unsigned t,
Level l) {
1569 return std::make_unique<DenseLevel>(t, l, sz);
1571 return std::make_unique<BatchLevel>(t, l, sz);
1573 return std::make_unique<CompressedLevel>(t, l, lt, sz, b[0], b[1]);
1575 return std::make_unique<LooseCompressedLevel>(t, l, lt, sz, b[0], b[1]);
1577 return std::make_unique<SingletonLevel>(t, l, lt, sz, b[0]);
1579 return std::make_unique<NOutOfMLevel>(t, l, lt, sz, b[0]);
1581 llvm_unreachable(
"undefined level format");
1583 llvm_unreachable(
"unrecognizable level format");
1586 std::unique_ptr<SparseTensorLevel>
1588 unsigned tid,
Level lvl) {
1592 Value sz = stt.hasEncoding()
1593 ? LvlOp::create(b, l, t, lvl).getResult()
1594 : tensor::DimOp::create(b, l, t, lvl).getResult();
1598 Value pos = ToPositionsOp::create(b, l, t, lvl);
1599 buffers.push_back(pos);
1602 Value pos = ToCoordinatesOp::create(b, l, t, lvl);
1603 buffers.push_back(pos);
1608 std::pair<std::unique_ptr<SparseTensorLevel>, std::unique_ptr<SparseIterator>>
1611 auto stl = std::make_unique<BatchLevel>(tid, lvl, sz);
1612 auto it = std::make_unique<TrivialIterator>(*stl);
1614 return std::make_pair(std::move(stl), std::move(it));
1617 std::unique_ptr<SparseIterator>
1621 std::unique_ptr<SparseIterator> ret;
1625 ret = std::make_unique<DedupIterator>(b, l, iterSpace.
getLastLvl(),
1629 ret = std::make_unique<TrivialIterator>(b, l, iterSpace.
getLastLvl(),
1637 std::unique_ptr<SparseIterator>
1640 std::unique_ptr<SparseIterator> ret;
1644 ret = std::make_unique<DedupIterator>(stl);
1646 ret = std::make_unique<TrivialIterator>(stl);
1648 ret->setSparseEmitStrategy(strategy);
1652 std::unique_ptr<SparseIterator>
1658 std::make_unique<FilterIterator>(std::move(sit), offset, stride, size);
1659 ret->setSparseEmitStrategy(strategy);
1663 std::unique_ptr<SparseIterator>
1667 auto ret = std::make_unique<PadIterator>(std::move(sit), padLow, padHigh);
1668 ret->setSparseEmitStrategy(strategy);
1673 auto *filter = llvm::dyn_cast_or_null<FilterIterator>(it);
1675 return &filter->getWrappedIterator();
1681 std::unique_ptr<SparseIterator> &&delegate,
Value size,
unsigned stride,
1686 std::unique_ptr<SparseIterator> it =
1687 std::make_unique<NonEmptySubSectIterator>(b, l, parent,
1688 std::move(delegate), size);
1693 it = std::make_unique<FilterIterator>(std::move(it),
C_IDX(0),
1694 C_IDX(stride), loopBound);
1696 it->setSparseEmitStrategy(strategy);
1709 std::unique_ptr<SparseIterator> it = std::make_unique<SubSectIterator>(
1713 it = std::make_unique<FilterIterator>(std::move(it),
C_IDX(0),
1714 C_IDX(stride), loopBound);
1716 it->setSparseEmitStrategy(strategy);
union mlir::linalg::@1257::ArityGroupAndKind::Kind kind
static bool isUnique(It begin, It end)
#define SELECT(c, lhs, rhs)
std::tuple< Value, Value, Value > ValueTuple
static scf::ValueVector genWhenInBound(OpBuilder &b, Location l, SparseIterator &it, ValueRange elseRet, llvm::function_ref< scf::ValueVector(OpBuilder &, Location, Value)> builder)
static const SparseIterator * tryUnwrapFilter(const SparseIterator *it)
std::pair< Value, Value > ValuePair
#define CMPI(p, lhs, rhs)
static Value offsetFromMinCrd(OpBuilder &b, Location l, Value minCrd, Value size)
Generates code to compute the absolute offset of the slice based on the provide minimum coordinates i...
StringAttr getStringAttr(const Twine &bytes)
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Operation is the basic unit of execution within MLIR.
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
result_range getResults()
This class provides an abstraction over the various different ranges of value types.
This class provides an abstraction over the different types of ranges over Values.
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
A SparseIterationSpace represents a sparse set of coordinates defined by (possibly multiple) levels o...
SparseIterationSpace()=default
const SparseTensorLevel & getLastLvl() const
static SparseIterationSpace fromValues(IterSpaceType dstTp, ValueRange values, unsigned tid)
std::unique_ptr< SparseIterator > extractIterator(OpBuilder &b, Location l) const
ArrayRef< std::unique_ptr< SparseTensorLevel > > getLvlRef() const
Helper class that generates loop conditions, etc, to traverse a sparse tensor level.
void updateCrd(Value crd)
virtual void genInitImpl(OpBuilder &, Location, const SparseIterator *)=0
ValueRange forward(OpBuilder &b, Location l)
virtual bool isBatchIterator() const =0
virtual void locateImpl(OpBuilder &b, Location l, Value crd)
void genInit(OpBuilder &b, Location l, const SparseIterator *p)
ValueRange getBatchCrds() const
virtual Value derefImpl(OpBuilder &b, Location l)=0
virtual void setSparseEmitStrategy(SparseEmitStrategy strategy)
Value genNotEnd(OpBuilder &b, Location l)
void locate(OpBuilder &b, Location l, Value crd)
virtual ValueRange forwardIf(OpBuilder &b, Location l, Value cond)
virtual SparseEmitStrategy getSparseEmitStrategy() const
virtual Value genNotEndImpl(OpBuilder &b, Location l)=0
void inherentBatch(const SparseIterator &parent)
virtual std::string getDebugInterfacePrefix() const =0
ValueRange getCursor() const
virtual ValueRange getCurPosition() const
Value deref(OpBuilder &b, Location l)
virtual bool randomAccessible() const =0
virtual ValueRange forwardImpl(OpBuilder &b, Location l)=0
void seek(ValueRange vals)
virtual SmallVector< Type > getCursorValTypes(OpBuilder &b) const =0
The base class for all types of sparse tensor levels.
virtual std::pair< Value, Value > peekRangeAt(OpBuilder &b, Location l, ValueRange batchPrefix, ValueRange parentPos, Value inPadZone=nullptr) const =0
Peeks the lower and upper bound to fully traverse the level with the given position parentPos,...
virtual Value peekCrdAt(OpBuilder &b, Location l, ValueRange batchPrefix, Value iv) const =0
std::string toString() const
MlirDiagnostic wrap(mlir::Diagnostic &diagnostic)
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
LoopNest buildLoopNest(OpBuilder &builder, Location loc, ValueRange lbs, ValueRange ubs, ValueRange steps, ValueRange iterArgs, function_ref< ValueVector(OpBuilder &, Location, ValueRange, ValueRange)> bodyBuilder=nullptr)
Creates a perfect nest of "for" loops, i.e.
SmallVector< Value > ValueVector
An owning vector of values, handy to return from functions.
bool isUniqueLT(LevelType lt)
std::unique_ptr< SparseTensorLevel > makeSparseTensorLevel(OpBuilder &b, Location l, Value t, unsigned tid, Level lvl)
Helper function to create a TensorLevel object from given tensor.
std::unique_ptr< SparseIterator > makeTraverseSubSectIterator(OpBuilder &b, Location l, const SparseIterator &subsectIter, const SparseIterator &parent, std::unique_ptr< SparseIterator > &&wrap, Value loopBound, unsigned stride, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterates over a non-empty subsection created b...
uint64_t Level
The type of level identifiers and level-ranks.
uint64_t getN(LevelType lt)
Value constantI1(OpBuilder &builder, Location loc, bool b)
Generates a constant of i1 type.
Value genIndexLoad(OpBuilder &builder, Location loc, Value mem, ValueRange s)
Generates a pointer/index load from the sparse storage scheme.
std::pair< std::unique_ptr< SparseTensorLevel >, std::unique_ptr< SparseIterator > > makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl, SparseEmitStrategy strategy)
Helper function to create a synthetic SparseIterator object that iterates over a dense space specifie...
std::unique_ptr< SparseIterator > makePaddedIterator(std::unique_ptr< SparseIterator > &&sit, Value padLow, Value padHigh, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterates over a padded sparse level (the padde...
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
std::unique_ptr< SparseIterator > makeSimpleIterator(OpBuilder &b, Location l, const SparseIterationSpace &iterSpace)
Helper function to create a simple SparseIterator object that iterate over the entire iteration space...
std::unique_ptr< SparseIterator > makeSlicedLevelIterator(std::unique_ptr< SparseIterator > &&sit, Value offset, Value stride, Value size, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterates over a sliced space,...
std::unique_ptr< SparseIterator > makeNonEmptySubSectIterator(OpBuilder &b, Location l, const SparseIterator *parent, Value loopBound, std::unique_ptr< SparseIterator > &&delegate, Value size, unsigned stride, SparseEmitStrategy strategy)
Helper function to create a SparseIterator object that iterate over the non-empty subsections set.
LogicalResult serialize(ModuleOp moduleOp, SmallVectorImpl< uint32_t > &binary, const SerializationOptions &options={})
Serializes the given SPIR-V moduleOp and writes to binary.
OwningOpRef< spirv::ModuleOp > deserialize(ArrayRef< uint32_t > binary, MLIRContext *context, const DeserializationOptions &options={})
Deserializes the given SPIR-V binary module and creates a MLIR ModuleOp in the given context.
Include the generated interface declarations.
SparseEmitStrategy
Defines a scope for reinterpret map pass.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
This enum defines all the sparse representations supportable by the SparseTensor dialect.
constexpr unsigned getNumBuffer() const
constexpr bool hasDenseSemantic() const
Check if the LevelType is considered to be dense-like.
constexpr LevelFormat getLvlFmt() const
Get the LevelFormat of the LevelType.
constexpr bool isa() const
Check if the LevelType is in the LevelFormat.
constexpr bool isWithPosLT() const
Check if the LevelType needs positions array.
constexpr bool isWithCrdLT() const
Check if the LevelType needs coordinates array.