43 llvm_unreachable(
"Unsupported overhead bitwidth");
49 if (
auto intTp = dyn_cast<IntegerType>(tp))
51 llvm_unreachable(
"Unknown overhead type");
67 llvm_unreachable(
"Unknown OverheadType");
92 #define CASE(ONAME, O) \
93 case OverheadType::kU##ONAME: \
98 llvm_unreachable(
"Unknown OverheadType");
122 if (
auto complexTp = dyn_cast<ComplexType>(elemTp)) {
123 auto complexEltTp = complexTp.getElementType();
124 if (complexEltTp.isF64())
126 if (complexEltTp.isF32())
129 llvm_unreachable(
"Unknown primary type");
134 #define CASE(VNAME, V) \
135 case PrimaryType::k##VNAME: \
140 llvm_unreachable(
"Unknown PrimaryType");
158 if (isa<IndexType>(srcTp) || isa<IndexType>(dstTp))
159 return builder.
create<arith::IndexCastOp>(loc, dstTp, value);
161 const auto srcIntTp = dyn_cast_or_null<IntegerType>(srcTp);
162 const bool isUnsignedCast = srcIntTp ? srcIntTp.isUnsigned() :
false;
168 if (
auto rtp = dyn_cast<RankedTensorType>(dstTp)) {
170 assert(rtp.getRank() == 0);
172 return builder.
create<tensor::FromElementsOp>(loc, rtp, elem);
179 Value load = builder.
create<memref::LoadOp>(loc, mem, s);
180 if (!isa<IndexType>(load.
getType())) {
190 if (isa<FloatType>(tp))
192 if (isa<IndexType>(tp))
194 if (
auto intTp = dyn_cast<IntegerType>(tp))
196 if (isa<RankedTensorType, VectorType>(tp)) {
197 auto shapedTp = cast<ShapedType>(tp);
198 if (
auto one =
getOneAttr(builder, shapedTp.getElementType()))
201 llvm_unreachable(
"Unsupported attribute type");
208 if (isa<FloatType>(tp))
209 return builder.
create<arith::CmpFOp>(loc, arith::CmpFPredicate::UNE, v,
212 return builder.
create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne, v,
214 if (dyn_cast<ComplexType>(tp))
215 return builder.
create<complex::NotEqualOp>(loc, v, zero);
216 llvm_unreachable(
"Non-numeric type");
224 if (reassociation.size() < srcShape.size()) {
228 for (
unsigned i = start; i < start + map.value().size(); i++) {
229 dstDim = builder.
create<arith::MulIOp>(loc, dstDim, srcShape[i]);
231 dstShape.push_back(dstDim);
232 start = start + map.value().size();
234 assert(start == srcShape.size());
239 assert(reassociation.size() == srcShape.size());
242 for (
unsigned i = 0, size = srcShape.size(); i < size; i++) {
243 const auto &map = reassociation[i];
244 auto srcDim = srcShape[i];
246 for (
unsigned j = start;
j < start + map.size();
j++) {
251 if (staticDstShape[
j] == ShapedType::kDynamic) {
255 for (
unsigned k = start; k < start + map.size(); k++) {
256 if (staticDstShape[k] != ShapedType::kDynamic) {
263 builder.
create<arith::DivUIOp>(loc, srcDim, productVal);
264 dstShape.push_back(dynamicSize);
267 dstShape.push_back(
constantIndex(builder, loc, staticDstShape[
j]));
270 start = start + map.size();
272 assert(start == staticDstShape.size());
280 const unsigned srcRank = srcSizes.size();
281 const unsigned dstRank = dstSizes.size();
282 assert(srcRank == srcCvs.size() &&
"Source rank mismatch");
283 const bool isCollapse = srcRank > dstRank;
284 const ValueRange sizes = isCollapse ? srcSizes : dstSizes;
291 for (
unsigned j = start, end = start + map.value().size();
j < end;
j++) {
292 linear = builder.
create<arith::MulIOp>(loc, linear, sizes[
j]);
299 for (
unsigned j = start, end = start + map.value().size();
j < end;
j++) {
300 linear = builder.
create<arith::DivUIOp>(loc, linear, sizes[
j]);
302 const Value mul = builder.
create<arith::MulIOp>(loc, srcCvs[
j], linear);
303 val = val ? builder.
create<arith::AddIOp>(loc, val, mul) : mul;
305 const Value old = val;
306 val = builder.
create<arith::DivUIOp>(loc, val, linear);
307 assert(dstCvs.size() ==
j);
308 dstCvs.push_back(val);
309 val = builder.
create<arith::RemUIOp>(loc, old, linear);
314 assert(dstCvs.size() == i);
315 dstCvs.push_back(val);
317 start += map.value().size();
320 assert(dstCvs.size() == dstRank);
329 auto func = module.lookupSymbol<func::FuncOp>(result.getAttr());
331 OpBuilder moduleBuilder(module.getBodyRegion());
332 func = moduleBuilder.
create<func::FuncOp>(
333 module.getLoc(), name,
336 if (
static_cast<bool>(emitCInterface))
337 func->
setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(),
348 getFunc(module, name, resultType, operands, emitCInterface);
349 return builder.
create<func::CallOp>(loc, resultType, fn, operands);
361 unsigned sz,
Type tp,
bool staticShape) {
364 return builder.
create<memref::AllocaOp>(loc, memTp);
382 const unsigned sz = values.size();
385 for (
unsigned i = 0; i < sz; i++) {
387 builder.
create<memref::StoreOp>(loc, values[i], buffer, idx);
393 RankedTensorType tensorTp,
395 Type elemTp = tensorTp.getElementType();
396 auto shape = tensorTp.getShape();
399 for (
unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
400 if (shape[i] == ShapedType::kDynamic)
401 dynamicSizes.push_back(sizes[i]);
403 Value mem = builder.
create<memref::AllocOp>(loc, memTp, dynamicSizes);
411 builder.
create<memref::DeallocOp>(loc, buffer);
439 const Dimension dimRank = stt.getDimRank();
440 const auto coordinates = attr.getIndices().getValues<IntegerAttr>();
441 const auto values = attr.getValues().getValues<
Attribute>();
448 using ElementAttr = std::pair<SmallVector<IntegerAttr>,
Attribute>;
452 for (
size_t i = 0, nse = values.size(); i < nse; i++) {
453 elems.emplace_back();
454 elems.back().second = values[i];
455 auto &coords = elems.back().first;
456 coords.reserve(dimRank);
458 coords.push_back(coordinates[i * dimRank + d]);
462 std::sort(elems.begin(), elems.end(),
463 [order](
const ElementAttr &lhs,
const ElementAttr &rhs) {
464 if (std::addressof(lhs) == std::addressof(rhs))
467 auto lhsCoords = llvm::map_to_vector(
468 lhs.first, [](IntegerAttr i) { return i.getInt(); });
469 auto rhsCoords = llvm::map_to_vector(
470 rhs.first, [](IntegerAttr i) { return i.getInt(); });
476 if (lhsLvlCrds[l] == rhsLvlCrds[l])
478 return lhsLvlCrds[l] < rhsLvlCrds[l];
480 llvm_unreachable(
"no equal coordinate in sparse element attr");
484 cvs.reserve(dimRank);
485 for (
size_t i = 0, nse = values.size(); i < nse; i++) {
488 for (
Dimension d = 0; d < dimRank; d++) {
489 auto crd = elems[i].first[d].getInt();
494 if (isa<ComplexType>(attr.getElementType())) {
495 auto valAttr = cast<ArrayAttr>(elems[i].second);
496 val = builder.
create<complex::ConstantOp>(loc, attr.getElementType(),
499 auto valAttr = cast<TypedAttr>(elems[i].second);
500 val = builder.
create<arith::ConstantOp>(loc, valAttr);
508 size_t size,
Value mem,
509 size_t offsetIdx,
Value offsetVal) {
511 const auto memTp = cast<MemRefType>(mem.
getType());
512 assert(memTp.getRank() == 1);
513 const Size memSh = memTp.getDimSize(0);
514 assert(ShapedType::isDynamic(memSh) || memSh >=
static_cast<Size>(size));
515 assert(offsetIdx == 0 || offsetIdx < size);
519 for (
unsigned i = 0; i < size; i++) {
522 if (i == offsetIdx && offsetVal)
523 v = builder.
create<arith::AddIOp>(loc, v, offsetVal);
532 const size_t vsize = vs.size();
533 const auto memTp = cast<MemRefType>(mem.
getType());
534 assert(memTp.getRank() == 1);
535 const Size memSh = memTp.getDimSize(0);
536 assert(ShapedType::isDynamic(memSh) || memSh >=
static_cast<Size>(vsize));
537 assert(offsetIdx == 0 || offsetIdx < vsize);
541 (offsetIdx == v.index() && offsetVal)
542 ? builder.
create<arith::AddIOp>(loc, v.value(), offsetVal)
544 builder.
create<memref::StoreOp>(loc, w, mem,
551 auto tTp = llvm::cast<TensorType>(tensor.
getType());
553 return builder.
create<bufferization::ToMemrefOp>(loc, mTp, tensor)
560 assert(enc && enc.isSlice());
561 std::optional<unsigned> offset = enc.getStaticDimSliceOffset(dim);
562 if (offset.has_value())
564 return builder.
create<ToSliceOffsetOp>(loc, tensor, APInt(64, dim));
570 assert(enc && enc.isSlice());
571 std::optional<unsigned> stride = enc.getStaticDimSliceStride(dim);
572 if (stride.has_value())
574 return builder.
create<ToSliceStrideOp>(loc, tensor, APInt(64, dim));
580 Value &dimSizesBuffer) {
584 dimSizesValues.clear();
585 dimSizesValues.reserve(dimRank);
587 const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
598 createFuncCall(builder, loc,
"createCheckedSparseTensorReader", opaqueTp,
599 {tensor, dimShapesBuffer, valTp}, EmitCInterface::On)
604 dimSizesBuffer = dimShapesBuffer;
609 createFuncCall(builder, loc,
"getSparseTensorReaderDimSizes", memTp,
610 reader, EmitCInterface::On)
614 for (
Dimension d = 0; d < dimRank; d++) {
616 dimSizesValues[d] = builder.
create<memref::LoadOp>(
627 Value &dim2lvlBuffer,
628 Value &lvl2dimBuffer) {
631 lvlSizesValues.clear();
632 lvlSizesValues.reserve(lvlRank);
637 assert(dimRank == lvlRank);
639 iotaValues.reserve(lvlRank);
640 for (
Level l = 0; l < lvlRank; l++) {
642 lvlSizesValues.push_back(dimSizesValues[l]);
644 dim2lvlBuffer = lvl2dimBuffer =
allocaBuffer(builder, loc, iotaValues);
645 return dimSizesBuffer;
655 assert(lvlRank == dimToLvl.getNumResults());
656 for (
Level l = 0; l < lvlRank; l++) {
663 uint64_t cf = 0, cm = 0;
666 d = cast<AffineDimExpr>(exp).getPosition();
670 auto floor = cast<AffineBinaryOpExpr>(exp);
671 d = cast<AffineDimExpr>(
floor.getLHS()).getPosition();
672 cf = cast<AffineConstantExpr>(
floor.getRHS()).getValue();
676 auto mod = cast<AffineBinaryOpExpr>(exp);
677 d = cast<AffineDimExpr>(mod.getLHS()).getPosition();
678 cm = cast<AffineConstantExpr>(mod.getRHS()).getValue();
682 llvm::report_fatal_error(
"unsupported dim2lvl in sparse tensor type");
691 lvlSz = dimSizesValues[d];
693 lvlSz = builder.
create<arith::DivUIOp>(loc, lvlSz,
698 lvlSizesValues.push_back(lvlSz);
701 assert(dimRank == lvlToDim.getNumResults());
702 for (
Dimension d = 0; d < dimRank; d++) {
711 l = cast<AffineDimExpr>(exp).getPosition();
716 auto add = cast<AffineBinaryOpExpr>(exp);
718 auto mul = cast<AffineBinaryOpExpr>(add.getLHS());
719 ll = cast<AffineDimExpr>(mul.getLHS()).getPosition();
720 c = cast<AffineConstantExpr>(mul.getRHS()).getValue();
721 l = cast<AffineDimExpr>(add.getRHS()).getPosition();
725 llvm::report_fatal_error(
"unsupported lvl2dim in sparse tensor type");
730 dim2lvlBuffer =
allocaBuffer(builder, loc, dim2lvlValues);
731 lvl2dimBuffer =
allocaBuffer(builder, loc, lvl2dimValues);
#define MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO)
#define MLIR_SPARSETENSOR_FOREVERY_V(DO)
static int64_t product(ArrayRef< int64_t > vals)
Base type for affine expression.
AffineExprKind getKind() const
Return the classification for this type.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
unsigned getNumResults() const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Attributes are known-constant values of operations.
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
This class is a general helper class for creating context-global objects like types,...
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
FloatAttr getFloatAttr(Type type, double value)
IntegerType getIntegerType(unsigned width)
MLIRContext * getContext() const
static DenseElementsAttr get(ShapedType type, ArrayRef< Attribute > values)
Constructs a dense elements attribute from an array of element values.
A symbol reference with a reference path containing a single element.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class helps build Operations.
Block * getBlock() const
Returns the current block of the builder.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Operation is the basic unit of execution within MLIR.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isIntOrIndex() const
Return true if this is an integer (of any signedness) or an index type.
bool isInteger() const
Return true if this is an integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Specialization of arith.constant op that returns an integer of index type.
A wrapper around RankedTensorType, which has three goals:
Type getElementType() const
ArrayRef< Size > getDimShape() const
Returns the dimension-shape.
Dimension getDimRank() const
Returns the dimension-rank.
AffineMap getLvlToDim() const
Returns the lvlToDiml mapping (or the null-map for the identity).
bool isIdentity() const
Returns true if the dimToLvl mapping is the identity.
bool hasDynamicDimShape() const
Returns true if any dimension has dynamic size.
Level getLvlRank() const
Returns the level-rank.
bool isDynamicDim(Dimension d) const
Returns true if the given dimension has dynamic size.
AffineMap getDimToLvl() const
Returns the dimToLvl mapping (or the null-map for the identity).
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value val, int64_t dim)
Create one memref::DimOp or tensor::DimOp depending on the type of val.
DynamicAPInt floor(const Fraction &f)
TypedAttr getOneAttr(Builder &builder, Type tp)
Generates a 1-valued attribute of the given type.
FlatSymbolRefAttr getFunc(ModuleOp module, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Returns a function reference (first hit also inserts into module).
Value genAllocaScalar(OpBuilder &builder, Location loc, Type tp)
Generates an uninitialized temporary buffer with room for one value of the given type,...
Value constantIndex(OpBuilder &builder, Location loc, int64_t i)
Generates a constant of index type.
void foreachInSparseConstant(OpBuilder &builder, Location loc, SparseElementsAttr attr, AffineMap order, function_ref< void(ArrayRef< Value >, Value)> callback)
Iterate over a sparse constant, generates constantOp for value and coordinates.
Value constantZero(OpBuilder &builder, Location loc, Type tp)
Generates a 0-valued constant of the given type.
Value allocaBuffer(OpBuilder &builder, Location loc, ValueRange values)
Generates a temporary buffer, initializes it with the given contents, and returns it as type memref<?...
OverheadType posTypeEncoding(SparseTensorEncodingAttr enc)
Returns the OverheadType for position overhead storage.
OverheadType
Encoding of overhead types (both position overhead and coordinate overhead), for "overloading" @newSp...
uint64_t Dimension
The type of dimension identifiers and dimension-ranks.
OverheadType crdTypeEncoding(SparseTensorEncodingAttr enc)
Returns the OverheadType for coordinate overhead storage.
uint64_t Level
The type of level identifiers and level-ranks.
TypedValue< BaseMemRefType > genToMemref(OpBuilder &builder, Location loc, Value tensor)
OverheadType overheadTypeEncoding(unsigned width)
Converts an overhead storage bitwidth to its internal type-encoding.
Value genIndexLoad(OpBuilder &builder, Location loc, Value mem, ValueRange s)
Generates a pointer/index load from the sparse storage scheme.
int64_t Size
The type for individual components of a compile-time shape, including the value ShapedType::kDynamic ...
StringRef overheadTypeFunctionSuffix(OverheadType ot)
Convert OverheadType to its function-name suffix.
PrimaryType
Encoding of the elemental type, for "overloading" @newSparseTensor.
RankedTensorType getRankedTensorType(T &&t)
Convenience method to abbreviate casting getType().
PrimaryType primaryTypeEncoding(Type elemTp)
Converts a primary storage type to its internal type-encoding.
Operation * getTop(Operation *op)
Scans to top of generated loop.
Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice slice for the sparse tensor slice, return a constant if the offs...
Type getOpaquePointerType(MLIRContext *ctx)
Returns the equivalent of void* for opaque arguments to the execution engine.
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
Value genMapBuffers(OpBuilder &builder, Location loc, SparseTensorType stt, ArrayRef< Value > dimSizesValues, Value dimSizesBuffer, SmallVectorImpl< Value > &lvlSizesValues, Value &dim2lvlBuffer, Value &lvl2dimBuffer)
Generates code to set up the buffer parameters for a map.
Value genIsNonzero(OpBuilder &builder, Location loc, Value v)
Generates the comparison v != 0 where v is of numeric type.
Value genReader(OpBuilder &builder, Location loc, SparseTensorType stt, Value tensor, SmallVectorImpl< Value > &dimSizesValues, Value &dimSizesBuffer)
Generates code that opens a reader and sets the dimension sizes.
Value genScalarToTensor(OpBuilder &builder, Location loc, Value elem, Type dstTp)
Add conversion from scalar to given type (possibly a 0-rank tensor).
void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer)
Generates code to deallocate a dense buffer.
Value genAlloca(OpBuilder &builder, Location loc, Value sz, Type tp)
Generates an uninitialized temporary buffer of the given size and type, but returns it as type memref...
constexpr uint64_t encodeLvl(uint64_t i, uint64_t c, uint64_t ii)
SmallVector< Value > loadAll(OpBuilder &builder, Location loc, size_t size, Value mem, size_t offsetIdx=0, Value offsetVal=Value())
Loads size-many values from the memref, which must have rank-1 and size greater-or-equal to size.
constexpr uint64_t encodeDim(uint64_t i, uint64_t cf, uint64_t cm)
Bit manipulations for affine encoding.
void genReshapeDstShape(OpBuilder &builder, Location loc, SmallVectorImpl< Value > &dstShape, ArrayRef< Value > srcShape, ArrayRef< Size > staticDstShape, ArrayRef< ReassociationIndices > reassociation)
Computes the shape of destination tensor of a reshape operator.
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
void reshapeCvs(OpBuilder &builder, Location loc, ArrayRef< ReassociationIndices > reassociation, ValueRange srcSizes, ValueRange srcCvs, ValueRange dstSizes, SmallVectorImpl< Value > &dstCvs)
Reshape coordinates during a reshaping operation.
func::CallOp createFuncCall(OpBuilder &builder, Location loc, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Creates a CallOp to the function reference returned by getFunc() in the builder's module.
Value genCast(OpBuilder &builder, Location loc, Value value, Type dstTy)
Add type casting between arith and index types when needed.
StringRef primaryTypeFunctionSuffix(PrimaryType pt)
Convert PrimaryType to its function-name suffix.
Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice offset for the sparse tensor slice, return a constant if the off...
Value constantPrimaryTypeEncoding(OpBuilder &builder, Location loc, Type elemTp)
Generates a constant of the internal type-encoding for primary storage.
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl< Value > &sizes, Location loc, Value src)
Populates given sizes array from dense tensor or sparse tensor constant.
Type getOverheadType(Builder &builder, OverheadType ot)
Converts the internal type-encoding for overhead storage to an mlir::Type.
EmitCInterface
Shorthand aliases for the emitCInterface argument to getFunc(), createFuncCall(), and replaceOpWithFu...
Value allocDenseTensor(OpBuilder &builder, Location loc, RankedTensorType tensorTp, ValueRange sizes)
Generates code to allocate a buffer of the given type, and zero initialize it.
void storeAll(OpBuilder &builder, Location loc, Value mem, ValueRange vs, size_t offsetIdx=0, Value offsetVal=Value())
Stores all the values of vs into the memref mem, which must have rank-1 and size greater-or-equal to ...
Include the generated interface declarations.
Value convertScalarToDtype(OpBuilder &b, Location loc, Value operand, Type toType, bool isUnsignedCast)
Converts a scalar value operand to type toType.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
@ Mul
RHS of mul is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ DimId
Dimensional identifier.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.