35 assert(std::addressof(l) == std::addressof(r) || l != r);
36 return l.first < r.first;
44 unsigned numTensors,
unsigned numLoops,
unsigned maxRank)
45 : linalgOp(linop), sparseOptions(opts),
46 latticeMerger(numTensors, numLoops, maxRank), loopEmitter(),
47 sparseOut(
nullptr), outerParNest(-1u), insChain(), expValues(),
48 expFilled(), expAdded(), expCount(), redVal(), redExp(
detail::kInvalidId),
49 redCustom(
detail::kInvalidId), redValidLexInsert() {}
53 std::optional<ExprId> optExp = latticeMerger.buildTensorExpFromLinalg(
op());
62 assert(insChain ==
nullptr &&
"must only start emitting once");
64 insChain = sparseOut->get();
65 latticeMerger.setHasSparseOut(
true);
77 for (
OpOperand &t : linalgOp->getOpOperands()) {
78 tensors.push_back(t.get());
80 const Level lvlRank = linalgOp.getMatchingIndexingMap(&t).getNumResults();
83 assert(!enc || lvlRank == enc.getLvlRank());
84 for (
Level lvl = 0; lvl < lvlRank; lvl++)
87 loopEmitter.initialize(
89 StringAttr::get(linalgOp.getContext(),
90 linalg::GenericOp::getOperationName()),
107 params.push_back(redVal);
109 params.push_back(redValidLexInsert);
114 params.push_back(expCount);
115 if (insChain !=
nullptr)
116 params.push_back(insChain);
117 auto r = callback(params);
126 if (insChain !=
nullptr)
140 for (utils::IteratorType it : linalgOp.getIteratorTypesArray()) {
141 if (it == utils::IteratorType::reduction) {
142 if (latticeMerger.hasNegateOnOut(
exp))
158 if (latticeMerger.isSingleCondition(
tensor,
exp))
168 const auto iteratorTypes = linalgOp.getIteratorTypesArray();
169 for (
unsigned i = 0, e =
getLoopNum(); i < e; i++) {
177 assert(
static_cast<int64_t>(outerParNest) >=
178 linalgOp.getRank(linalgOp.getDpsInitOperand(0)) - 1);
187 return loopEmitter.getLoopIV(i);
195 assert(sparseOut !=
nullptr && insChain !=
nullptr);
200 return sparseOut == o && outerParNest ==
static_cast<LoopId>(rank - 1) &&
206 assert(sparseOut !=
nullptr && expValues ==
nullptr);
214 assert(sparseOut !=
nullptr && expValues !=
nullptr);
219 assert(sparseOut !=
nullptr && expValues !=
nullptr);
220 expValues = expFilled = expAdded = expCount =
Value();
231 latticeMerger.setExprValue(
exp, val);
237 latticeMerger.clearExprValue(redExp);
238 latticeMerger.setExprValue(redExp, val);
245 latticeMerger.clearExprValue(redExp);
252 redValidLexInsert = val;
256 assert(redValidLexInsert &&
isReduc() && val);
257 redValidLexInsert = val;
262 redValidLexInsert =
Value();
272 return dyn_cast<sparse_tensor::ReduceOp>(
exp(redCustom).
op).getIdentity();
static bool isMaterializing(Value val)
Returns true if tensor materializes uninitialized into the computation.
static void sortDependentLoops(std::vector< LoopCoeffPair > &target)
Sorts the dependent loops such that it is ordered in the same sequence in which loops will be generat...
static bool isMaterializing(OpOperand *op, bool isZero)
This class represents an operand of an operation.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
void startReduc(ExprId exp, Value val)
void updateValidLexInsert(Value val)
std::optional< Operation * > genLoopBoundary(function_ref< std::optional< Operation * >(MutableArrayRef< Value > parameters)> callback)
Generates loop boundary statements (entering/exiting loops).
bool isAdmissibleTensorExp(ExprId e)
Whether the tensor expression is admissible for codegen.
bool atExpandLevel(OpOperand *o, unsigned rank, LoopId n) const
bool isCustomReduc() const
CodegenEnv(linalg::GenericOp linop, SparsificationOptions opts, unsigned numTensors, unsigned numLoops, unsigned maxRank)
Constructs a code generation environment which can be passed around during sparsification for bookkee...
constexpr TensorId makeTensorId(unsigned t) const
void startExpand(Value values, Value filled, Value added, Value count)
unsigned getLoopNum() const
void updateInsertionChain(Value chain)
void startCustomReduc(ExprId exp)
linalg::GenericOp op() const
Value getLoopVar(LoopId i) const
Returns the induction-variable for the given loop.
LogicalResult initTensorExp()
void startEmit(SparseEmitStrategy emitStrategy)
void updateExpandCount(Value count)
void updateReduc(Value val)
void startValidLexInsert(Value val)
Value getCustomRedId() const
const TensorExp & exp(ExprId e) const
bool isValidLexInsert() const
std::vector< LoopCoeffPair > & getDependentLoops(TensorId t, Level lvl)
Returns the list of loop indices which appear in the non-trivial index expression on t_l,...
bool isAllDense() const
Returns true for tensors where every level is dense.
bool isReductionIterator(utils::IteratorType iteratorType)
static constexpr unsigned kInvalidId
A constant serving as the canonically invalid identifier, regardless of the identifier type.
std::pair< LoopId, unsigned > LoopCoeffPair
A pair of loop id and its coefficients.
unsigned ExprId
TensorExp identifiers.
unsigned TensorId
Tensor identifiers, chosen to be the BlockArgument::getArgNumber of the value passed to Merger::build...
uint64_t Level
The type of level identifiers and level-ranks.
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
unsigned LoopId
Loop identifiers.
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
Include the generated interface declarations.
SparseEmitStrategy
Defines a scope for reinterpret map pass.
llvm::function_ref< Fn > function_ref
Options for the Sparsification pass.