40 llvm_unreachable(
"Unsupported overhead bitwidth");
46 if (
auto intTp = dyn_cast<IntegerType>(tp))
48 llvm_unreachable(
"Unknown overhead type");
64 llvm_unreachable(
"Unknown OverheadType");
89#define CASE(ONAME, O) \
90 case OverheadType::kU##ONAME: \
95 llvm_unreachable(
"Unknown OverheadType");
107 if (
auto complexTp = dyn_cast<ComplexType>(elemTp)) {
108 Type elt = complexTp.getElementType();
131 if (
auto complexTp = dyn_cast<ComplexType>(elemTp)) {
132 auto complexEltTp = complexTp.getElementType();
133 if (complexEltTp.isF64())
135 if (complexEltTp.isF32())
138 llvm_unreachable(
"Unknown primary type");
143#define CASE(VNAME, V) \
144 case PrimaryType::k##VNAME: \
149 llvm_unreachable(
"Unknown PrimaryType");
167 if (isa<IndexType>(srcTp) || isa<IndexType>(dstTp))
168 return arith::IndexCastOp::create(builder, loc, dstTp, value);
170 const auto srcIntTp = dyn_cast_or_null<IntegerType>(srcTp);
171 const bool isUnsignedCast = srcIntTp ? srcIntTp.isUnsigned() :
false;
177 if (
auto rtp = dyn_cast<RankedTensorType>(dstTp)) {
179 assert(rtp.getRank() == 0);
181 return tensor::FromElementsOp::create(builder, loc, rtp, elem);
188 Value load = memref::LoadOp::create(builder, loc, mem, s);
189 if (!isa<IndexType>(
load.getType())) {
190 if (
load.getType().getIntOrFloatBitWidth() < 64)
199 if (isa<FloatType>(tp))
201 if (isa<IndexType>(tp))
203 if (
auto intTp = dyn_cast<IntegerType>(tp))
205 if (isa<RankedTensorType, VectorType>(tp)) {
206 auto shapedTp = cast<ShapedType>(tp);
207 if (
auto one =
getOneAttr(builder, shapedTp.getElementType()))
210 llvm_unreachable(
"Unsupported attribute type");
217 if (isa<FloatType>(tp))
218 return arith::CmpFOp::create(builder, loc, arith::CmpFPredicate::UNE, v,
221 return arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ne, v,
223 if (isa<ComplexType>(tp))
224 return complex::NotEqualOp::create(builder, loc, v, zero);
225 llvm_unreachable(
"Non-numeric type");
233 if (reassociation.size() < srcShape.size()) {
235 for (
const auto &map : llvm::enumerate(reassociation)) {
237 for (
unsigned i = start; i < start + map.value().size(); i++) {
238 dstDim = arith::MulIOp::create(builder, loc, dstDim, srcShape[i]);
240 dstShape.push_back(dstDim);
241 start = start + map.value().size();
243 assert(start == srcShape.size());
248 assert(reassociation.size() == srcShape.size());
251 for (
unsigned i = 0, size = srcShape.size(); i < size; i++) {
252 const auto &map = reassociation[i];
253 auto srcDim = srcShape[i];
255 for (
unsigned j = start;
j < start + map.size();
j++) {
260 if (staticDstShape[
j] == ShapedType::kDynamic) {
264 for (
unsigned k = start; k < start + map.size(); k++) {
265 if (staticDstShape[k] != ShapedType::kDynamic) {
272 arith::DivUIOp::create(builder, loc, srcDim, productVal);
273 dstShape.push_back(dynamicSize);
276 dstShape.push_back(
constantIndex(builder, loc, staticDstShape[
j]));
279 start = start + map.size();
281 assert(start == staticDstShape.size());
289 const unsigned srcRank = srcSizes.size();
290 const unsigned dstRank = dstSizes.size();
291 assert(srcRank == srcCvs.size() &&
"Source rank mismatch");
292 const bool isCollapse = srcRank > dstRank;
293 const ValueRange sizes = isCollapse ? srcSizes : dstSizes;
297 for (
const auto &map : llvm::enumerate(reassociation)) {
300 for (
unsigned j = start, end = start + map.value().size();
j < end;
j++) {
301 linear = arith::MulIOp::create(builder, loc, linear, sizes[
j]);
308 for (
unsigned j = start, end = start + map.value().size();
j < end;
j++) {
309 linear = arith::DivUIOp::create(builder, loc, linear, sizes[
j]);
312 arith::MulIOp::create(builder, loc, srcCvs[
j], linear);
313 val = val ? arith::AddIOp::create(builder, loc, val,
mul) :
mul;
315 const Value old = val;
316 val = arith::DivUIOp::create(builder, loc, val, linear);
317 assert(dstCvs.size() ==
j);
318 dstCvs.push_back(val);
319 val = arith::RemUIOp::create(builder, loc, old, linear);
324 assert(dstCvs.size() == i);
325 dstCvs.push_back(val);
327 start += map.value().size();
330 assert(dstCvs.size() == dstRank);
338 auto result = SymbolRefAttr::get(context, name);
339 auto func =
module.lookupSymbol<func::FuncOp>(result.getAttr());
341 OpBuilder moduleBuilder(module.getBodyRegion());
342 func = func::FuncOp::create(
343 moduleBuilder, module.getLoc(), name,
344 FunctionType::get(context, operands.
getTypes(), resultType));
346 if (
static_cast<bool>(emitCInterface))
347 func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(),
348 UnitAttr::get(context));
356 auto module = builder.getBlock()->getParentOp()->getParentOfType<ModuleOp>();
358 getFunc(module, name, resultType, operands, emitCInterface);
359 return func::CallOp::create(builder, loc, resultType, fn, operands);
363 return LLVM::LLVMPointerType::get(ctx);
371 unsigned sz,
Type tp,
bool staticShape) {
373 auto memTp = MemRefType::get({sz}, tp);
374 return memref::AllocaOp::create(builder, loc, memTp);
381 auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
382 return memref::AllocaOp::create(builder, loc, memTp,
ValueRange{sz});
387 return memref::AllocaOp::create(builder, loc, MemRefType::get({}, tp));
392 const unsigned sz = values.size();
395 for (
unsigned i = 0; i < sz; i++) {
397 memref::StoreOp::create(builder, loc, values[i], buffer, idx);
403 RankedTensorType tensorTp,
405 Type elemTp = tensorTp.getElementType();
406 auto shape = tensorTp.getShape();
407 auto memTp = MemRefType::get(
shape, elemTp);
409 for (
unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
410 if (
shape[i] == ShapedType::kDynamic)
411 dynamicSizes.push_back(sizes[i]);
413 Value mem = memref::AllocOp::create(builder, loc, memTp, dynamicSizes);
421 memref::DeallocOp::create(builder, loc, buffer);
433 for (; isa<scf::ForOp, scf::WhileOp, scf::ParallelOp, scf::IfOp>(
447 const Dimension dimRank = stt.getDimRank();
448 const auto coordinates = attr.getIndices().getValues<IntegerAttr>();
449 const auto values = attr.getValues().getValues<
Attribute>();
456 using ElementAttr = std::pair<SmallVector<IntegerAttr>,
Attribute>;
460 for (
size_t i = 0, nse = values.size(); i < nse; i++) {
461 elems.emplace_back();
462 elems.back().second = values[i];
463 auto &coords = elems.back().first;
464 coords.reserve(dimRank);
466 coords.push_back(coordinates[i * dimRank + d]);
470 llvm::sort(elems, [order](
const ElementAttr &
lhs,
const ElementAttr &
rhs) {
471 if (std::addressof(
lhs) == std::addressof(
rhs))
474 auto lhsCoords = llvm::map_to_vector(
475 lhs.first, [](IntegerAttr i) { return i.getInt(); });
476 auto rhsCoords = llvm::map_to_vector(
477 rhs.first, [](IntegerAttr i) { return i.getInt(); });
483 if (lhsLvlCrds[l] == rhsLvlCrds[l])
485 return lhsLvlCrds[l] < rhsLvlCrds[l];
487 llvm_unreachable(
"no equal coordinate in sparse element attr");
491 cvs.reserve(dimRank);
492 for (
size_t i = 0, nse = values.size(); i < nse; i++) {
495 for (
Dimension d = 0; d < dimRank; d++) {
496 auto crd = elems[i].first[d].getInt();
501 if (isa<ComplexType>(attr.getElementType())) {
502 auto valAttr = cast<ArrayAttr>(elems[i].second);
503 val = complex::ConstantOp::create(builder, loc, attr.getElementType(),
506 auto valAttr = cast<TypedAttr>(elems[i].second);
507 val = arith::ConstantOp::create(builder, loc, valAttr);
515 size_t size,
Value mem,
516 size_t offsetIdx,
Value offsetVal) {
518 const auto memTp = cast<MemRefType>(mem.
getType());
519 assert(memTp.getRank() == 1);
520 const Size memSh = memTp.getDimSize(0);
521 assert(ShapedType::isDynamic(memSh) || memSh >=
static_cast<Size>(size));
522 assert(offsetIdx == 0 || offsetIdx < size);
526 for (
unsigned i = 0; i < size; i++) {
527 Value v = memref::LoadOp::create(builder, loc, mem,
529 if (i == offsetIdx && offsetVal)
530 v = arith::AddIOp::create(builder, loc, v, offsetVal);
539 const size_t vsize = vs.size();
540 const auto memTp = cast<MemRefType>(mem.
getType());
541 assert(memTp.getRank() == 1);
542 const Size memSh = memTp.getDimSize(0);
543 assert(ShapedType::isDynamic(memSh) || memSh >=
static_cast<Size>(vsize));
544 assert(offsetIdx == 0 || offsetIdx < vsize);
546 for (
const auto &v : llvm::enumerate(vs)) {
548 (offsetIdx == v.index() && offsetVal)
549 ? arith::AddIOp::create(builder, loc, v.value(), offsetVal)
551 memref::StoreOp::create(builder, loc, w, mem,
558 auto tTp = llvm::cast<TensorType>(
tensor.getType());
559 auto mTp = MemRefType::get(tTp.getShape(), tTp.getElementType());
560 return cast<TypedValue<BaseMemRefType>>(
561 bufferization::ToBufferOp::create(builder, loc, mTp,
tensor).getResult());
567 assert(enc && enc.isSlice());
568 std::optional<unsigned> offset = enc.getStaticDimSliceOffset(dim);
569 if (offset.has_value())
571 return ToSliceOffsetOp::create(builder, loc,
tensor, APInt(64, dim));
577 assert(enc && enc.isSlice());
578 std::optional<unsigned> stride = enc.getStaticDimSliceStride(dim);
579 if (stride.has_value())
581 return ToSliceStrideOp::create(builder, loc,
tensor, APInt(64, dim));
587 Value &dimSizesBuffer) {
591 dimSizesValues.clear();
592 dimSizesValues.reserve(dimRank);
594 const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
605 createFuncCall(builder, loc,
"createCheckedSparseTensorReader", opaqueTp,
611 dimSizesBuffer = dimShapesBuffer;
614 auto memTp = MemRefType::get({ShapedType::kDynamic}, indexTp);
616 createFuncCall(builder, loc,
"getSparseTensorReaderDimSizes", memTp,
621 for (
Dimension d = 0; d < dimRank; d++) {
623 dimSizesValues[d] = memref::LoadOp::create(
624 builder, loc, dimSizesBuffer,
constantIndex(builder, loc, d));
634 Value &dim2lvlBuffer,
635 Value &lvl2dimBuffer) {
638 lvlSizesValues.clear();
639 lvlSizesValues.reserve(lvlRank);
644 assert(dimRank == lvlRank);
646 iotaValues.reserve(lvlRank);
647 for (
Level l = 0; l < lvlRank; l++) {
649 lvlSizesValues.push_back(dimSizesValues[l]);
651 dim2lvlBuffer = lvl2dimBuffer =
allocaBuffer(builder, loc, iotaValues);
652 return dimSizesBuffer;
662 assert(lvlRank == dimToLvl.getNumResults());
663 for (
Level l = 0; l < lvlRank; l++) {
670 uint64_t
cf = 0, cm = 0;
673 d = cast<AffineDimExpr>(exp).getPosition();
677 auto floor = cast<AffineBinaryOpExpr>(exp);
678 d = cast<AffineDimExpr>(floor.getLHS()).getPosition();
679 cf = cast<AffineConstantExpr>(floor.getRHS()).getValue();
683 auto mod = cast<AffineBinaryOpExpr>(exp);
684 d = cast<AffineDimExpr>(mod.getLHS()).getPosition();
685 cm = cast<AffineConstantExpr>(mod.getRHS()).getValue();
689 llvm::report_fatal_error(
"unsupported dim2lvl in sparse tensor type");
698 lvlSz = dimSizesValues[d];
700 lvlSz = arith::DivUIOp::create(builder, loc, lvlSz,
705 lvlSizesValues.push_back(lvlSz);
708 assert(dimRank == lvlToDim.getNumResults());
709 for (
Dimension d = 0; d < dimRank; d++) {
718 l = cast<AffineDimExpr>(exp).getPosition();
723 auto add = cast<AffineBinaryOpExpr>(exp);
725 auto mul = cast<AffineBinaryOpExpr>(
add.getLHS());
726 ll = cast<AffineDimExpr>(
mul.getLHS()).getPosition();
727 c = cast<AffineConstantExpr>(
mul.getRHS()).getValue();
728 l = cast<AffineDimExpr>(
add.getRHS()).getPosition();
732 llvm::report_fatal_error(
"unsupported lvl2dim in sparse tensor type");
737 dim2lvlBuffer =
allocaBuffer(builder, loc, dim2lvlValues);
738 lvl2dimBuffer =
allocaBuffer(builder, loc, lvl2dimValues);
#define MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO)
#define MLIR_SPARSETENSOR_FOREVERY_V(DO)
static int64_t product(ArrayRef< int64_t > vals)
Base type for affine expression.
AffineExprKind getKind() const
Return the classification for this type.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
unsigned getNumResults() const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Attributes are known-constant values of operations.
This class is a general helper class for creating context-global objects like types,...
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
FloatAttr getFloatAttr(Type type, double value)
IntegerType getIntegerType(unsigned width)
MLIRContext * getContext() const
static DenseElementsAttr get(ShapedType type, ArrayRef< Attribute > values)
Constructs a dense elements attribute from an array of element values.
A symbol reference with a reference path containing a single element.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class helps build Operations.
Operation is the basic unit of execution within MLIR.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isIntOrIndex() const
Return true if this is an integer (of any signedness) or an index type.
bool isInteger() const
Return true if this is an integer type (with the specified width).
This class provides an abstraction over the different types of ranges over Values.
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
A wrapper around RankedTensorType, which has three goals:
Type getElementType() const
Dimension getDimRank() const
Returns the dimension-rank.
AffineMap getLvlToDim() const
Returns the lvlToDiml mapping (or the null-map for the identity).
bool isIdentity() const
Returns true if the dimToLvl mapping is the identity.
bool hasDynamicDimShape() const
Returns true if any dimension has dynamic size.
ArrayRef< Size > getDimShape() const
Returns the dimension-shape.
Level getLvlRank() const
Returns the level-rank.
bool isDynamicDim(Dimension d) const
Returns true if the given dimension has dynamic size.
AffineMap getDimToLvl() const
Returns the dimToLvl mapping (or the null-map for the identity).
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value val, int64_t dim)
Create one memref::DimOp or tensor::DimOp depending on the type of val.
TypedAttr getOneAttr(Builder &builder, Type tp)
Generates a 1-valued attribute of the given type.
FlatSymbolRefAttr getFunc(ModuleOp module, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Returns a function reference (first hit also inserts into module).
Value genAllocaScalar(OpBuilder &builder, Location loc, Type tp)
Generates an uninitialized temporary buffer with room for one value of the given type,...
Value constantIndex(OpBuilder &builder, Location loc, int64_t i)
Generates a constant of index type.
void foreachInSparseConstant(OpBuilder &builder, Location loc, SparseElementsAttr attr, AffineMap order, function_ref< void(ArrayRef< Value >, Value)> callback)
Iterate over a sparse constant, generates constantOp for value and coordinates.
Value constantZero(OpBuilder &builder, Location loc, Type tp)
Generates a 0-valued constant of the given type.
uint64_t Dimension
The type of dimension identifiers and dimension-ranks.
Value allocaBuffer(OpBuilder &builder, Location loc, ValueRange values)
Generates a temporary buffer, initializes it with the given contents, and returns it as type memref<?
OverheadType posTypeEncoding(SparseTensorEncodingAttr enc)
Returns the OverheadType for position overhead storage.
OverheadType
Encoding of overhead types (both position overhead and coordinate overhead), for "overloading" @newSp...
OverheadType crdTypeEncoding(SparseTensorEncodingAttr enc)
Returns the OverheadType for coordinate overhead storage.
TypedValue< BaseMemRefType > genToMemref(OpBuilder &builder, Location loc, Value tensor)
OverheadType overheadTypeEncoding(unsigned width)
Converts an overhead storage bitwidth to its internal type-encoding.
Value genIndexLoad(OpBuilder &builder, Location loc, Value mem, ValueRange s)
Generates a pointer/index load from the sparse storage scheme.
StringRef overheadTypeFunctionSuffix(OverheadType ot)
Convert OverheadType to its function-name suffix.
PrimaryType
Encoding of the elemental type, for "overloading" @newSparseTensor.
RankedTensorType getRankedTensorType(T &&t)
Convenience method to abbreviate casting getType().
bool isValidPrimaryType(Type elemTp)
Returns true if the given type is a valid sparse tensor element type supported by the runtime library...
uint64_t Level
The type of level identifiers and level-ranks.
PrimaryType primaryTypeEncoding(Type elemTp)
Converts a primary storage type to its internal type-encoding.
Operation * getTop(Operation *op)
Scans to top of generated loop.
Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice slice for the sparse tensor slice, return a constant if the offs...
Type getOpaquePointerType(MLIRContext *ctx)
Returns the equivalent of void* for opaque arguments to the execution engine.
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
Value genMapBuffers(OpBuilder &builder, Location loc, SparseTensorType stt, ArrayRef< Value > dimSizesValues, Value dimSizesBuffer, SmallVectorImpl< Value > &lvlSizesValues, Value &dim2lvlBuffer, Value &lvl2dimBuffer)
Generates code to set up the buffer parameters for a map.
Value genIsNonzero(OpBuilder &builder, Location loc, Value v)
Generates the comparison v != 0 where v is of numeric type.
Value genReader(OpBuilder &builder, Location loc, SparseTensorType stt, Value tensor, SmallVectorImpl< Value > &dimSizesValues, Value &dimSizesBuffer)
Generates code that opens a reader and sets the dimension sizes.
Value genScalarToTensor(OpBuilder &builder, Location loc, Value elem, Type dstTp)
Add conversion from scalar to given type (possibly a 0-rank tensor).
void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer)
Generates code to deallocate a dense buffer.
Value genAlloca(OpBuilder &builder, Location loc, Value sz, Type tp)
Generates an uninitialized temporary buffer of the given size and type, but returns it as type memref...
constexpr uint64_t encodeLvl(uint64_t i, uint64_t c, uint64_t ii)
SmallVector< Value > loadAll(OpBuilder &builder, Location loc, size_t size, Value mem, size_t offsetIdx=0, Value offsetVal=Value())
Loads size-many values from the memref, which must have rank-1 and size greater-or-equal to size.
int64_t Size
The type for individual components of a compile-time shape, including the value ShapedType::kDynamic ...
constexpr uint64_t encodeDim(uint64_t i, uint64_t cf, uint64_t cm)
Bit manipulations for affine encoding.
void genReshapeDstShape(OpBuilder &builder, Location loc, SmallVectorImpl< Value > &dstShape, ArrayRef< Value > srcShape, ArrayRef< Size > staticDstShape, ArrayRef< ReassociationIndices > reassociation)
Computes the shape of destination tensor of a reshape operator.
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
void reshapeCvs(OpBuilder &builder, Location loc, ArrayRef< ReassociationIndices > reassociation, ValueRange srcSizes, ValueRange srcCvs, ValueRange dstSizes, SmallVectorImpl< Value > &dstCvs)
Reshape coordinates during a reshaping operation.
func::CallOp createFuncCall(OpBuilder &builder, Location loc, StringRef name, TypeRange resultType, ValueRange operands, EmitCInterface emitCInterface)
Creates a CallOp to the function reference returned by getFunc() in the builder's module.
Value genCast(OpBuilder &builder, Location loc, Value value, Type dstTy)
Add type casting between arith and index types when needed.
StringRef primaryTypeFunctionSuffix(PrimaryType pt)
Convert PrimaryType to its function-name suffix.
Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor, Dimension dim)
Generates code to retrieve the slice offset for the sparse tensor slice, return a constant if the off...
Value constantPrimaryTypeEncoding(OpBuilder &builder, Location loc, Type elemTp)
Generates a constant of the internal type-encoding for primary storage.
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl< Value > &sizes, Location loc, Value src)
Populates given sizes array from dense tensor or sparse tensor constant.
Type getOverheadType(Builder &builder, OverheadType ot)
Converts the internal type-encoding for overhead storage to an mlir::Type.
EmitCInterface
Shorthand aliases for the emitCInterface argument to getFunc(), createFuncCall(), and replaceOpWithFu...
Value allocDenseTensor(OpBuilder &builder, Location loc, RankedTensorType tensorTp, ValueRange sizes)
Generates code to allocate a buffer of the given type, and zero initialize it.
void storeAll(OpBuilder &builder, Location loc, Value mem, ValueRange vs, size_t offsetIdx=0, Value offsetVal=Value())
Stores all the values of vs into the memref mem, which must have rank-1 and size greater-or-equal to ...
Include the generated interface declarations.
Value convertScalarToDtype(OpBuilder &b, Location loc, Value operand, Type toType, bool isUnsignedCast)
Converts a scalar value operand to type toType.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
@ Mul
RHS of mul is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ DimId
Dimensional identifier.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
llvm::function_ref< Fn > function_ref
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.