40 llvm_unreachable(
"Unsupported overhead bitwidth");
46 if (
auto intTp = dyn_cast<IntegerType>(tp))
48 llvm_unreachable(
"Unknown overhead type");
64 llvm_unreachable(
"Unknown OverheadType");
89 #define CASE(ONAME, O) \
90 case OverheadType::kU##ONAME: \
95 llvm_unreachable(
"Unknown OverheadType");
119 if (
auto complexTp = dyn_cast<ComplexType>(elemTp)) {
120 auto complexEltTp = complexTp.getElementType();
121 if (complexEltTp.isF64())
123 if (complexEltTp.isF32())
126 llvm_unreachable(
"Unknown primary type");
131 #define CASE(VNAME, V) \
132 case PrimaryType::k##VNAME: \
137 llvm_unreachable(
"Unknown PrimaryType");
155 if (isa<IndexType>(srcTp) || isa<IndexType>(dstTp))
156 return arith::IndexCastOp::create(builder, loc, dstTp, value);
158 const auto srcIntTp = dyn_cast_or_null<IntegerType>(srcTp);
159 const bool isUnsignedCast = srcIntTp ? srcIntTp.isUnsigned() :
false;
165 if (
auto rtp = dyn_cast<RankedTensorType>(dstTp)) {
167 assert(rtp.getRank() == 0);
169 return tensor::FromElementsOp::create(builder, loc, rtp, elem);
176 Value load = memref::LoadOp::create(builder, loc, mem, s);
177 if (!isa<IndexType>(load.
getType())) {
179 load = arith::ExtUIOp::create(builder, loc, builder.
getI64Type(), load);
181 arith::IndexCastOp::create(builder, loc, builder.
getIndexType(), load);
187 if (isa<FloatType>(tp))
189 if (isa<IndexType>(tp))
191 if (
auto intTp = dyn_cast<IntegerType>(tp))
193 if (isa<RankedTensorType, VectorType>(tp)) {
194 auto shapedTp = cast<ShapedType>(tp);
195 if (
auto one =
getOneAttr(builder, shapedTp.getElementType()))
198 llvm_unreachable(
"Unsupported attribute type");
205 if (isa<FloatType>(tp))
206 return arith::CmpFOp::create(builder, loc, arith::CmpFPredicate::UNE, v,
209 return arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ne, v,
211 if (isa<ComplexType>(tp))
212 return complex::NotEqualOp::create(builder, loc, v, zero);
213 llvm_unreachable(
"Non-numeric type");
221 if (reassociation.size() < srcShape.size()) {
225 for (
unsigned i = start; i < start + map.value().size(); i++) {
226 dstDim = arith::MulIOp::create(builder, loc, dstDim, srcShape[i]);
228 dstShape.push_back(dstDim);
229 start = start + map.value().size();
231 assert(start == srcShape.size());
236 assert(reassociation.size() == srcShape.size());
239 for (
unsigned i = 0, size = srcShape.size(); i < size; i++) {
240 const auto &map = reassociation[i];
241 auto srcDim = srcShape[i];
243 for (
unsigned j = start;
j < start + map.size();
j++) {
248 if (staticDstShape[
j] == ShapedType::kDynamic) {
252 for (
unsigned k = start; k < start + map.size(); k++) {
253 if (staticDstShape[k] != ShapedType::kDynamic) {
260 arith::DivUIOp::create(builder, loc, srcDim, productVal);
261 dstShape.push_back(dynamicSize);
264 dstShape.push_back(
constantIndex(builder, loc, staticDstShape[
j]));
267 start = start + map.size();
269 assert(start == staticDstShape.size());
277 const unsigned srcRank = srcSizes.size();
278 const unsigned dstRank = dstSizes.size();
279 assert(srcRank == srcCvs.size() &&
"Source rank mismatch");
280 const bool isCollapse = srcRank > dstRank;
281 const ValueRange sizes = isCollapse ? srcSizes : dstSizes;
288 for (
unsigned j = start, end = start + map.value().size();
j < end;
j++) {
289 linear = arith::MulIOp::create(builder, loc, linear, sizes[
j]);
296 for (
unsigned j = start, end = start + map.value().size();
j < end;
j++) {
297 linear = arith::DivUIOp::create(builder, loc, linear, sizes[
j]);
300 arith::MulIOp::create(builder, loc, srcCvs[
j], linear);
301 val = val ? arith::AddIOp::create(builder, loc, val, mul) : mul;
303 const Value old = val;
304 val = arith::DivUIOp::create(builder, loc, val, linear);
305 assert(dstCvs.size() ==
j);
306 dstCvs.push_back(val);
307 val = arith::RemUIOp::create(builder, loc, old, linear);
312 assert(dstCvs.size() == i);
313 dstCvs.push_back(val);
315 start += map.value().size();
318 assert(dstCvs.size() == dstRank);
327 auto func = module.lookupSymbol<func::FuncOp>(result.getAttr());
329 OpBuilder moduleBuilder(module.getBodyRegion());
330 func = func::FuncOp::create(
331 moduleBuilder, module.getLoc(), name,
334 if (
static_cast<bool>(emitCInterface))
335 func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(),
346 getFunc(module, name, resultType, operands, emitCInterface);
347 return func::CallOp::create(builder, loc, resultType, fn, operands);
359 unsigned sz,
Type tp,
bool staticShape) {
362 return memref::AllocaOp::create(builder, loc, memTp);
370 return memref::AllocaOp::create(builder, loc, memTp,
ValueRange{sz});
375 return memref::AllocaOp::create(builder, loc,
MemRefType::get({}, tp));
380 const unsigned sz = values.size();
383 for (
unsigned i = 0; i < sz; i++) {
385 memref::StoreOp::create(builder, loc, values[i], buffer, idx);
391 RankedTensorType tensorTp,
393 Type elemTp = tensorTp.getElementType();
394 auto shape = tensorTp.getShape();
397 for (
unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
398 if (shape[i] == ShapedType::kDynamic)
399 dynamicSizes.push_back(sizes[i]);
401 Value mem = memref::AllocOp::create(builder, loc, memTp, dynamicSizes);
409 memref::DeallocOp::create(builder, loc, buffer);
437 const Dimension dimRank = stt.getDimRank();
438 const auto coordinates = attr.getIndices().getValues<IntegerAttr>();
439 const auto values = attr.getValues().getValues<
Attribute>();
446 using ElementAttr = std::pair<SmallVector<IntegerAttr>,
Attribute>;
450 for (
size_t i = 0, nse = values.size(); i < nse; i++) {
451 elems.emplace_back();
452 elems.back().second = values[i];
453 auto &coords = elems.back().first;
454 coords.reserve(dimRank);
456 coords.push_back(coordinates[i * dimRank + d]);
460 llvm::sort(elems, [order](
const ElementAttr &lhs,
const ElementAttr &rhs) {
461 if (std::addressof(lhs) == std::addressof(rhs))
464 auto lhsCoords = llvm::map_to_vector(
465 lhs.first, [](IntegerAttr i) { return i.getInt(); });
466 auto rhsCoords = llvm::map_to_vector(
467 rhs.first, [](IntegerAttr i) { return i.getInt(); });
473 if (lhsLvlCrds[l] == rhsLvlCrds[l])
475 return lhsLvlCrds[l] < rhsLvlCrds[l];
477 llvm_unreachable(
"no equal coordinate in sparse element attr");
481 cvs.reserve(dimRank);
482 for (
size_t i = 0, nse = values.size(); i < nse; i++) {
485 for (
Dimension d = 0; d < dimRank; d++) {
486 auto crd = elems[i].first[d].getInt();
491 if (isa<ComplexType>(attr.getElementType())) {
492 auto valAttr = cast<ArrayAttr>(elems[i].second);
493 val = complex::ConstantOp::create(builder, loc, attr.getElementType(),
496 auto valAttr = cast<TypedAttr>(elems[i].second);
497 val = arith::ConstantOp::create(builder, loc, valAttr);
505 size_t size,
Value mem,
506 size_t offsetIdx,
Value offsetVal) {
508 const auto memTp = cast<MemRefType>(mem.
getType());
509 assert(memTp.getRank() == 1);
510 const Size memSh = memTp.getDimSize(0);
511 assert(ShapedType::isDynamic(memSh) || memSh >=
static_cast<Size>(size));
512 assert(offsetIdx == 0 || offsetIdx < size);
516 for (
unsigned i = 0; i < size; i++) {
517 Value v = memref::LoadOp::create(builder, loc, mem,
519 if (i == offsetIdx && offsetVal)
520 v = arith::AddIOp::create(builder, loc, v, offsetVal);
529 const size_t vsize = vs.size();
530 const auto memTp = cast<MemRefType>(mem.
getType());
531 assert(memTp.getRank() == 1);
532 const Size memSh = memTp.getDimSize(0);
533 assert(ShapedType::isDynamic(memSh) || memSh >=
static_cast<Size>(vsize));
534 assert(offsetIdx == 0 || offsetIdx < vsize);
538 (offsetIdx == v.index() && offsetVal)
539 ? arith::AddIOp::create(builder, loc, v.value(), offsetVal)
541 memref::StoreOp::create(builder, loc, w, mem,
548 auto tTp = llvm::cast<TensorType>(tensor.
getType());
550 return cast<TypedValue<BaseMemRefType>>(
551 bufferization::ToBufferOp::create(builder, loc, mTp, tensor).getResult());
557 assert(enc && enc.isSlice());
558 std::optional<unsigned> offset = enc.getStaticDimSliceOffset(dim);
559 if (offset.has_value())
561 return ToSliceOffsetOp::create(builder, loc, tensor, APInt(64, dim));
567 assert(enc && enc.isSlice());
568 std::optional<unsigned> stride = enc.getStaticDimSliceStride(dim);
569 if (stride.has_value())
571 return ToSliceStrideOp::create(builder, loc, tensor, APInt(64, dim));
577 Value &dimSizesBuffer) {
581 dimSizesValues.clear();
582 dimSizesValues.reserve(dimRank);
584 const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
595 createFuncCall(builder, loc,
"createCheckedSparseTensorReader", opaqueTp,
601 dimSizesBuffer = dimShapesBuffer;
606 createFuncCall(builder, loc,
"getSparseTensorReaderDimSizes", memTp,
611 for (
Dimension d = 0; d < dimRank; d++) {
613 dimSizesValues[d] = memref::LoadOp::create(
614 builder, loc, dimSizesBuffer,
constantIndex(builder, loc, d));
624 Value &dim2lvlBuffer,
625 Value &lvl2dimBuffer) {
628 lvlSizesValues.clear();
629 lvlSizesValues.reserve(lvlRank);
634 assert(dimRank == lvlRank);
636 iotaValues.reserve(lvlRank);
637 for (
Level l = 0; l < lvlRank; l++) {
639 lvlSizesValues.push_back(dimSizesValues[l]);
641 dim2lvlBuffer = lvl2dimBuffer =
allocaBuffer(builder, loc, iotaValues);
642 return dimSizesBuffer;
652 assert(lvlRank == dimToLvl.getNumResults());
653 for (
Level l = 0; l < lvlRank; l++) {
660 uint64_t cf = 0, cm = 0;
663 d = cast<AffineDimExpr>(exp).getPosition();
667 auto floor = cast<AffineBinaryOpExpr>(exp);
668 d = cast<AffineDimExpr>(
floor.getLHS()).getPosition();
669 cf = cast<AffineConstantExpr>(
floor.getRHS()).getValue();
673 auto mod = cast<AffineBinaryOpExpr>(exp);
674 d = cast<AffineDimExpr>(mod.getLHS()).getPosition();
675 cm = cast<AffineConstantExpr>(mod.getRHS()).getValue();
679 llvm::report_fatal_error(
"unsupported dim2lvl in sparse tensor type");
688 lvlSz = dimSizesValues[d];
690 lvlSz = arith::DivUIOp::create(builder, loc, lvlSz,
695 lvlSizesValues.push_back(lvlSz);
698 assert(dimRank == lvlToDim.getNumResults());
699 for (
Dimension d = 0; d < dimRank; d++) {
708 l = cast<AffineDimExpr>(exp).getPosition();
713 auto add = cast<AffineBinaryOpExpr>(exp);
715 auto mul = cast<AffineBinaryOpExpr>(
add.getLHS());
716 ll = cast<AffineDimExpr>(mul.getLHS()).getPosition();
717 c = cast<AffineConstantExpr>(mul.getRHS()).getValue();
718 l = cast<AffineDimExpr>(
add.getRHS()).getPosition();
722 llvm::report_fatal_error(
"unsupported lvl2dim in sparse tensor type");
727 dim2lvlBuffer =
allocaBuffer(builder, loc, dim2lvlValues);
728 lvl2dimBuffer =
allocaBuffer(builder, loc, lvl2dimValues);
#define MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO)
#define MLIR_SPARSETENSOR_FOREVERY_V(DO)
static int64_t product(ArrayRef< int64_t > vals)
Base type for affine expression.
AffineExprKind getKind() const
Return the classification for this type.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
unsigned getNumResults() const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Attributes are known-constant values of operations.
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
This class is a general helper class for creating context-global objects like types,...
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
FloatAttr getFloatAttr(Type type, double value)
IntegerType getIntegerType(unsigned width)
MLIRContext * getContext() const
static DenseElementsAttr get(ShapedType type, ArrayRef< Attribute > values)
Constructs a dense elements attribute from an array of element values.
A symbol reference with a reference path containing a single element.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class helps build Operations.
Block * getBlock() const
Returns the current block of the builder.
Operation is the basic unit of execution within MLIR.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isIntOrIndex() const
Return true if this is an integer (of any signedness) or an index type.
bool isInteger() const
Return true if this is an integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
A wrapper around RankedTensorType, which has three goals:
Type getElementType() const
ArrayRef< Size > getDimShape() const
Returns the dimension-shape.
Dimension getDimRank() const
Returns the dimension-rank.
AffineMap getLvlToDim() const
Returns the lvlToDiml mapping (or the null-map for the identity).
bool isIdentity() const
Returns true if the dimToLvl mapping is the identity.
bool hasDynamicDimShape() const
Returns true if any dimension has dynamic size.
Level getLvlRank() const
Returns the level-rank.
bool isDynamicDim(Dimension d) const
Returns true if the given dimension has dynamic size.
AffineMap getDimToLvl() const
Returns the dimToLvl mapping (or the null-map for the identity).
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value val, int64_t dim)
Create one memref::DimOp or tensor::DimOp depending on the type of val.
DynamicAPInt floor(const Fraction &f)
TypedAttr getOneAttr(Builder &builder, Type tp)
Generates a 1-valued attribute of the given type.
FlatSymbolRefAttr getFunc(ModuleOp module, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Returns a function reference (first hit also inserts into module).
Value genAllocaScalar(OpBuilder &builder, Location loc, Type tp)
Generates an uninitialized temporary buffer with room for one value of the given type,...
Value constantIndex(OpBuilder &builder, Location loc, int64_t i)
Generates a constant of index type.
void foreachInSparseConstant(OpBuilder &builder, Location loc, SparseElementsAttr attr, AffineMap order, function_ref< void(ArrayRef< Value >, Value)> callback)
Iterate over a sparse constant, generates constantOp for value and coordinates.
Value constantZero(OpBuilder &builder, Location loc, Type tp)
Generates a 0-valued constant of the given type.
Value allocaBuffer(OpBuilder &builder, Location loc, ValueRange values)
Generates a temporary buffer, initializes it with the given contents, and returns it as type memref<?...
OverheadType posTypeEncoding(SparseTensorEncodingAttr enc)
Returns the OverheadType for position overhead storage.
OverheadType
Encoding of overhead types (both position overhead and coordinate overhead), for "overloading" @newSp...
uint64_t Dimension
The type of dimension identifiers and dimension-ranks.
OverheadType crdTypeEncoding(SparseTensorEncodingAttr enc)
Returns the OverheadType for coordinate overhead storage.
uint64_t Level
The type of level identifiers and level-ranks.
TypedValue< BaseMemRefType > genToMemref(OpBuilder &builder, Location loc, Value tensor)
OverheadType overheadTypeEncoding(unsigned width)
Converts an overhead storage bitwidth to its internal type-encoding.
Value genIndexLoad(OpBuilder &builder, Location loc, Value mem, ValueRange s)
Generates a pointer/index load from the sparse storage scheme.
int64_t Size
The type for individual components of a compile-time shape, including the value ShapedType::kDynamic ...
StringRef overheadTypeFunctionSuffix(OverheadType ot)
Convert OverheadType to its function-name suffix.
PrimaryType
Encoding of the elemental type, for "overloading" @newSparseTensor.
RankedTensorType getRankedTensorType(T &&t)
Convenience method to abbreviate casting getType().
PrimaryType primaryTypeEncoding(Type elemTp)
Converts a primary storage type to its internal type-encoding.
Operation * getTop(Operation *op)
Scans to top of generated loop.
Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice slice for the sparse tensor slice, return a constant if the offs...
Type getOpaquePointerType(MLIRContext *ctx)
Returns the equivalent of void* for opaque arguments to the execution engine.
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
Value genMapBuffers(OpBuilder &builder, Location loc, SparseTensorType stt, ArrayRef< Value > dimSizesValues, Value dimSizesBuffer, SmallVectorImpl< Value > &lvlSizesValues, Value &dim2lvlBuffer, Value &lvl2dimBuffer)
Generates code to set up the buffer parameters for a map.
Value genIsNonzero(OpBuilder &builder, Location loc, Value v)
Generates the comparison v != 0 where v is of numeric type.
Value genReader(OpBuilder &builder, Location loc, SparseTensorType stt, Value tensor, SmallVectorImpl< Value > &dimSizesValues, Value &dimSizesBuffer)
Generates code that opens a reader and sets the dimension sizes.
Value genScalarToTensor(OpBuilder &builder, Location loc, Value elem, Type dstTp)
Add conversion from scalar to given type (possibly a 0-rank tensor).
void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer)
Generates code to deallocate a dense buffer.
Value genAlloca(OpBuilder &builder, Location loc, Value sz, Type tp)
Generates an uninitialized temporary buffer of the given size and type, but returns it as type memref...
constexpr uint64_t encodeLvl(uint64_t i, uint64_t c, uint64_t ii)
SmallVector< Value > loadAll(OpBuilder &builder, Location loc, size_t size, Value mem, size_t offsetIdx=0, Value offsetVal=Value())
Loads size-many values from the memref, which must have rank-1 and size greater-or-equal to size.
constexpr uint64_t encodeDim(uint64_t i, uint64_t cf, uint64_t cm)
Bit manipulations for affine encoding.
void genReshapeDstShape(OpBuilder &builder, Location loc, SmallVectorImpl< Value > &dstShape, ArrayRef< Value > srcShape, ArrayRef< Size > staticDstShape, ArrayRef< ReassociationIndices > reassociation)
Computes the shape of destination tensor of a reshape operator.
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
void reshapeCvs(OpBuilder &builder, Location loc, ArrayRef< ReassociationIndices > reassociation, ValueRange srcSizes, ValueRange srcCvs, ValueRange dstSizes, SmallVectorImpl< Value > &dstCvs)
Reshape coordinates during a reshaping operation.
func::CallOp createFuncCall(OpBuilder &builder, Location loc, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Creates a CallOp to the function reference returned by getFunc() in the builder's module.
Value genCast(OpBuilder &builder, Location loc, Value value, Type dstTy)
Add type casting between arith and index types when needed.
StringRef primaryTypeFunctionSuffix(PrimaryType pt)
Convert PrimaryType to its function-name suffix.
Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice offset for the sparse tensor slice, return a constant if the off...
Value constantPrimaryTypeEncoding(OpBuilder &builder, Location loc, Type elemTp)
Generates a constant of the internal type-encoding for primary storage.
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl< Value > &sizes, Location loc, Value src)
Populates given sizes array from dense tensor or sparse tensor constant.
Type getOverheadType(Builder &builder, OverheadType ot)
Converts the internal type-encoding for overhead storage to an mlir::Type.
EmitCInterface
Shorthand aliases for the emitCInterface argument to getFunc(), createFuncCall(), and replaceOpWithFu...
Value allocDenseTensor(OpBuilder &builder, Location loc, RankedTensorType tensorTp, ValueRange sizes)
Generates code to allocate a buffer of the given type, and zero initialize it.
void storeAll(OpBuilder &builder, Location loc, Value mem, ValueRange vs, size_t offsetIdx=0, Value offsetVal=Value())
Stores all the values of vs into the memref mem, which must have rank-1 and size greater-or-equal to ...
Include the generated interface declarations.
Value convertScalarToDtype(OpBuilder &b, Location loc, Value operand, Type toType, bool isUnsignedCast)
Converts a scalar value operand to type toType.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
@ Mul
RHS of mul is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ DimId
Dimensional identifier.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.