9 #ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
10 #define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
23 namespace sparse_tensor {
90 bool hasOutput =
false,
bool isSparseOut =
false,
95 ValueRange tensors, StringAttr loopTag =
nullptr,
bool hasOutput =
false,
96 bool isSparseOut =
false,
unsigned numLoops = 0,
149 bool isParallel =
false,
bool needsUniv =
false);
162 return llvm::map_range(loopStack, [](
const LoopInfo &li) {
return li.iv; });
210 return std::make_pair(tidLvl % nt, tidLvl / nt);
214 template <
class ContainerTy>
216 using EltTy = decltype(*c.begin());
217 static_assert(std::is_same_v<llvm::remove_cvref_t<EltTy>,
TensorLevel>,
218 "Must be unpacking a TensorLevel range");
219 return llvm::map_range(std::forward<ContainerTy>(c), [
this](EltTy tl) {
230 return {spIterVals[tid].back()};
234 Value lastLvlPos = iters[tid].back().back()->getCurPosition().front();
235 batchCrds.push_back(lastLvlPos);
239 return getCurIterator(tid, lvl).
getCrd();
244 return llvm::StringLiteral(
"Emitted from");
254 struct LoopInfo final {
256 Value iv, StringAttr loopTag)
257 : tidLvls(tidLvls), loop(loop), userCodeBlock(userBlock), iv(iv) {
268 Block *
const userCodeBlock;
272 void categorizeIterators(ArrayRef<TensorLevel> tidLvls,
273 SmallVectorImpl<SparseIterator *> &raIters,
274 SmallVectorImpl<SparseIterator *> &spIters);
280 MutableArrayRef<Value>)>;
283 bool shouldIteratedByForLoop(ArrayRef<SparseIterator *> spIters);
289 Value genSparseCrd(OpBuilder &builder, Location loc,
TensorId tid,
294 bool isOutputTensor(
TensorId tid)
const {
298 bool isSparseOutput(
TensorId tid)
const {
299 return isOutputTensor(tid) && isSparseOut;
303 return tid < lvls.size() && lvl < lvls[tid].size();
308 void prepareLoopOverTensorAtLvl(OpBuilder &builder, Location loc,
317 std::pair<Operation *, Value>
318 emitForLoopOverTensorAtLvl(OpBuilder &builder, Location loc,
319 SparseIterator &iter, MutableArrayRef<Value> reduc,
328 std::pair<Operation *, Value>
329 emitWhileLoopOverTensorsAtLvls(OpBuilder &builder, Location loc,
330 ArrayRef<SparseIterator *> iters,
331 MutableArrayRef<Value> reduc,
bool needsUniv);
357 void exitForLoop(RewriterBase &rewriter, Location loc,
358 MutableArrayRef<Value> reduc);
361 void exitWhileLoop(OpBuilder &builder, Location loc,
362 MutableArrayRef<Value> reduc);
368 void initSubSectIterator(OpBuilder &builder, Location loc);
372 return levelReducedDep[tid][lvl];
375 SparseIterator &getCurIterator(
TensorId tid,
Level lvl)
const {
376 if (dependentLvlMap[tid][lvl].empty())
377 return *iters[tid][lvl].back();
379 assert(redDepOnLevel(tid, lvl) >= 1);
380 return *iters[tid][lvl][redDepOnLevel(tid, lvl) - 1];
383 std::unique_ptr<SparseIterator>
384 makeLevelIterator(OpBuilder &builder, Location loc,
TensorId tid,
Level l);
401 std::vector<Value> tensors;
402 std::vector<Value> loopHighs;
403 std::vector<std::vector<std::unique_ptr<SparseTensorLevel>>> lvls;
404 std::vector<std::vector<std::vector<std::unique_ptr<SparseIterator>>>> iters;
405 std::vector<Value> valBuffer;
409 std::vector<std::vector<std::vector<std::pair<LoopId, unsigned>>>>
414 std::vector<std::vector<std::vector<std::pair<Value, unsigned>>>> sliceMeta;
417 std::vector<std::vector<unsigned>> levelReducedDep;
425 std::vector<LoopInfo> loopStack;
429 std::vector<std::pair<Value, std::vector<TensorLevel>>> loopSeqStack;
436 std::vector<std::vector<Value>> spIterVals;
444 std::pair<Operation *, Value>
genCoIteration(OpBuilder &builder, Location loc,
445 ArrayRef<SparseIterator *> iters,
446 MutableArrayRef<Value> reduc,
448 bool userReducFirst =
false);
Base type for affine expression.
Block represents an ordered list of Operations.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
This class helps build Operations.
Operation is the basic unit of execution within MLIR.
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
A simple wrapper to encode a bitset of (at most 64) levels, currently used by sparse_tensor....
void exitCurrentLoop(RewriterBase &rewriter, Location loc, MutableArrayRef< Value > reduc={})
Generates code to exit the current loop (e.g., generates yields, forwards loop induction variables,...
constexpr static llvm::StringLiteral getLoopEmitterLoopAttrName()
void locateLvlAtAffineAddress(OpBuilder &builder, Location loc, TensorLevel tidLvl, AffineExpr lvlExpr)
Emits the address for a dense level based on the value evaluated by the provided affine expression.
const std::vector< Value > & getValBuffer() const
void enterNewLoopSeq(OpBuilder &builder, Location loc, ArrayRef< TensorLevel > tidLvls)
Enters a new loop sequence, the loops within the same sequence starts from the break points of previo...
Value genAffine(OpBuilder &builder, Location loc, AffineExpr a)
Generates code to compute an affine expression whose variables are LoopIds (i.e., a....
Region * enterCurrentCoIterationCase(OpBuilder &builder, Location loc, I64BitSet caseBit, unsigned caseIdx, MutableArrayRef< Value > reduc)
Operation * enterCoIterationOverTensorsAtLvls(OpBuilder &builder, Location loc, ArrayRef< TensorLevel > tidLvls, unsigned numCases, MutableArrayRef< Value > reduc={}, bool isParallel=false, bool needsUniv=false)
Emits a co-iteration loop over a set of tensors.
TensorId getOutTensorId() const
Gets the TensorId for output tensor.
TensorLevel makeTensorLevel(TensorId t, Level l) const
Compresses a TensorId and Level into a TensorLevel.
unsigned getNumManifestTensors() const
Gets the total number of manifest tensors (excluding the synthetic tensor).
void initialize(ValueRange tensors, StringAttr loopTag=nullptr, bool hasOutput=false, bool isSparseOut=false, unsigned numLoops=0, DependentLvlGetter getter=nullptr, SparseEmitStrategy emitStrategy=SparseEmitStrategy::kFunctional)
Takes an array of input tensors, which the generated loops will iterate over.
Value getLoopIV(LoopId n) const
Gets loop induction variable for the given loop.
std::pair< TensorId, Level > unpackTensorLevel(TensorLevel tidLvl) const
De-compresses a TensorLevel back to a pair of TensorId and Level.
auto unpackTensorLevelRange(ContainerTy &&c) const
Converts a range of TensorLevel to a range of std::pair<TensorId, Level>
SmallVector< Value > getValPosits(TensorId tid) const
Getters.
unsigned getNumTensors() const
Gets the total number of tensors that loopEmitter is operating on.
SmallVector< Value > getLoopIVs() const
Fills the out-parameter with the loop induction variables for all loops in the current loop-stack.
auto getLoopIVsRange() const
Get the range of values for all induction variables.
void initializeLoopEmit(OpBuilder &builder, Location loc, OutputUpdater updater=nullptr, SynTensorBoundSetter synSetter=nullptr)
Starts a loop emitting session by generating all the buffers needed for iterating over the tensors.
LoopId getCurrentDepth() const
Gets the current depth of the loop-stack.
void exitCurrentLoopSeq(OpBuilder &builder, Location loc)
Exits the current loop sequence, this will reset universal index to 0.
TensorId getSynTensorId() const
Gets the TensorId for synthetic tensor.
Value getCoord(TensorId tid, Level lvl) const
uint64_t Level
The type of level identifiers and level-ranks.
unsigned LoopId
Loop identifiers.
std::pair< Operation *, Value > genCoIteration(OpBuilder &builder, Location loc, ArrayRef< SparseIterator * > iters, MutableArrayRef< Value > reduc, Value uniIdx, bool userReducFirst=false)
unsigned TensorId
Tensor identifiers, chosen to be the BlockArgument::getArgNumber of the value passed to Merger::build...
Include the generated interface declarations.
SparseEmitStrategy
Defines a scope for reinterpret map pass.