51 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
71 #define ASSERT_NO_STRIDE(MEMREF) \
73 assert((MEMREF) && "Memref is nullptr"); \
74 assert(((MEMREF)->strides[0] == 1) && "Memref has non-trivial stride"); \
77 #define MEMREF_GET_USIZE(MEMREF) \
78 detail::checkOverflowCast<uint64_t>((MEMREF)->sizes[0])
80 #define ASSERT_USIZE_EQ(MEMREF, SZ) \
81 assert(detail::safelyEQ(MEMREF_GET_USIZE(MEMREF), (SZ)) && \
82 "Memref size mismatch")
84 #define MEMREF_GET_PAYLOAD(MEMREF) ((MEMREF)->data + (MEMREF)->offset)
93 template <
typename DataSizeT,
typename T>
94 static inline void aliasIntoMemref(DataSizeT size, T *data,
98 using MemrefSizeT = std::remove_reference_t<decltype(ref.
sizes[0])>;
99 ref.
sizes[0] = detail::checkOverflowCast<MemrefSizeT>(size);
114 #define CASE(p, c, v, P, C, V) \
115 if (posTp == (p) && crdTp == (c) && valTp == (v)) { \
117 case Action::kEmpty: { \
118 return SparseTensorStorage<P, C, V>::newEmpty( \
119 dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim); \
121 case Action::kFromReader: { \
122 assert(ptr && "Received nullptr for SparseTensorReader object"); \
123 SparseTensorReader &reader = *static_cast<SparseTensorReader *>(ptr); \
124 return static_cast<void *>(reader.readSparseTensor<P, C, V>( \
125 lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim)); \
127 case Action::kPack: { \
128 assert(ptr && "Received nullptr for SparseTensorStorage object"); \
129 intptr_t *buffers = static_cast<intptr_t *>(ptr); \
130 return SparseTensorStorage<P, C, V>::newFromBuffers( \
131 dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
134 case Action::kSortCOOInPlace: { \
135 assert(ptr && "Received nullptr for SparseTensorStorage object"); \
136 auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \
137 tensor.sortInPlace(); \
141 fprintf(stderr, "unknown action %d\n", static_cast<uint32_t>(action)); \
145 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V)
150 static_assert(std::is_same<index_type, uint64_t>::value,
151 "Expected index_type == uint64_t");
161 ASSERT_NO_STRIDE(dimSizesRef);
162 ASSERT_NO_STRIDE(lvlSizesRef);
163 ASSERT_NO_STRIDE(lvlTypesRef);
164 ASSERT_NO_STRIDE(dim2lvlRef);
165 ASSERT_NO_STRIDE(lvl2dimRef);
166 const uint64_t dimRank = MEMREF_GET_USIZE(dimSizesRef);
167 const uint64_t lvlRank = MEMREF_GET_USIZE(lvlSizesRef);
168 ASSERT_USIZE_EQ(lvlTypesRef, lvlRank);
169 ASSERT_USIZE_EQ(dim2lvlRef, lvlRank);
170 ASSERT_USIZE_EQ(lvl2dimRef, dimRank);
171 const index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef);
172 const index_type *lvlSizes = MEMREF_GET_PAYLOAD(lvlSizesRef);
173 const LevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef);
174 const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef);
175 const index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef);
285 fprintf(stderr,
"unsupported combination of types: <P=%d, C=%d, V=%d>\n",
286 static_cast<int>(posTp),
static_cast<int>(crdTp),
287 static_cast<int>(valTp));
293 #define IMPL_SPARSEVALUES(VNAME, V) \
294 void _mlir_ciface_sparseValues##VNAME(StridedMemRefType<V, 1> *ref, \
296 assert(ref &&tensor); \
298 static_cast<SparseTensorStorageBase *>(tensor)->getValues(&v); \
300 aliasIntoMemref(v->size(), v->data(), *ref); \
303 #undef IMPL_SPARSEVALUES
305 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \
306 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \
308 assert(ref &&tensor); \
309 std::vector<TYPE> *v; \
310 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, lvl); \
312 aliasIntoMemref(v->size(), v->data(), *ref); \
315 #define IMPL_SPARSEPOSITIONS(PNAME, P) \
316 IMPL_GETOVERHEAD(sparsePositions##PNAME, P, getPositions)
318 #undef IMPL_SPARSEPOSITIONS
320 #define IMPL_SPARSECOORDINATES(CNAME, C) \
321 IMPL_GETOVERHEAD(sparseCoordinates##CNAME, C, getCoordinates)
323 #undef IMPL_SPARSECOORDINATES
325 #define IMPL_SPARSECOORDINATESBUFFER(CNAME, C) \
326 IMPL_GETOVERHEAD(sparseCoordinatesBuffer##CNAME, C, getCoordinatesBuffer)
328 #undef IMPL_SPARSECOORDINATESBUFFER
330 #undef IMPL_GETOVERHEAD
332 #define IMPL_LEXINSERT(VNAME, V) \
333 void _mlir_ciface_lexInsert##VNAME( \
334 void *t, StridedMemRefType<index_type, 1> *lvlCoordsRef, \
335 StridedMemRefType<V, 0> *vref) { \
337 auto &tensor = *static_cast<SparseTensorStorageBase *>(t); \
338 ASSERT_NO_STRIDE(lvlCoordsRef); \
339 index_type *lvlCoords = MEMREF_GET_PAYLOAD(lvlCoordsRef); \
341 V *value = MEMREF_GET_PAYLOAD(vref); \
342 tensor.lexInsert(lvlCoords, *value); \
345 #undef IMPL_LEXINSERT
347 #define IMPL_EXPINSERT(VNAME, V) \
348 void _mlir_ciface_expInsert##VNAME( \
349 void *t, StridedMemRefType<index_type, 1> *lvlCoordsRef, \
350 StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \
351 StridedMemRefType<index_type, 1> *aref, index_type count) { \
353 auto &tensor = *static_cast<SparseTensorStorageBase *>(t); \
354 ASSERT_NO_STRIDE(lvlCoordsRef); \
355 ASSERT_NO_STRIDE(vref); \
356 ASSERT_NO_STRIDE(fref); \
357 ASSERT_NO_STRIDE(aref); \
358 ASSERT_USIZE_EQ(vref, MEMREF_GET_USIZE(fref)); \
359 index_type *lvlCoords = MEMREF_GET_PAYLOAD(lvlCoordsRef); \
360 V *values = MEMREF_GET_PAYLOAD(vref); \
361 bool *filled = MEMREF_GET_PAYLOAD(fref); \
362 index_type *added = MEMREF_GET_PAYLOAD(aref); \
363 uint64_t expsz = vref->sizes[0]; \
364 tensor.expInsert(lvlCoords, values, filled, added, count, expsz); \
367 #undef IMPL_EXPINSERT
372 ASSERT_NO_STRIDE(dimShapeRef);
373 const uint64_t dimRank = MEMREF_GET_USIZE(dimShapeRef);
374 const index_type *dimShape = MEMREF_GET_PAYLOAD(dimShapeRef);
376 return static_cast<void *
>(reader);
383 auto *dimSizes =
const_cast<uint64_t *
>(reader.
getDimSizes());
384 aliasIntoMemref(reader.
getRank(), dimSizes, *out);
387 #define IMPL_GETNEXT(VNAME, V, CNAME, C) \
388 bool _mlir_ciface_getSparseTensorReaderReadToBuffers##CNAME##VNAME( \
389 void *p, StridedMemRefType<index_type, 1> *dim2lvlRef, \
390 StridedMemRefType<index_type, 1> *lvl2dimRef, \
391 StridedMemRefType<C, 1> *cref, StridedMemRefType<V, 1> *vref) { \
393 auto &reader = *static_cast<SparseTensorReader *>(p); \
394 ASSERT_NO_STRIDE(dim2lvlRef); \
395 ASSERT_NO_STRIDE(lvl2dimRef); \
396 ASSERT_NO_STRIDE(cref); \
397 ASSERT_NO_STRIDE(vref); \
398 const uint64_t dimRank = reader.getRank(); \
399 const uint64_t lvlRank = MEMREF_GET_USIZE(dim2lvlRef); \
400 const uint64_t cSize = MEMREF_GET_USIZE(cref); \
401 const uint64_t vSize = MEMREF_GET_USIZE(vref); \
402 ASSERT_USIZE_EQ(lvl2dimRef, dimRank); \
403 assert(cSize >= lvlRank * reader.getNSE()); \
404 assert(vSize >= reader.getNSE()); \
408 index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef); \
409 index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef); \
410 C *lvlCoordinates = MEMREF_GET_PAYLOAD(cref); \
411 V *values = MEMREF_GET_PAYLOAD(vref); \
412 return reader.readToBuffers<C, V>(lvlRank, dim2lvl, lvl2dim, \
413 lvlCoordinates, values); \
422 ASSERT_NO_STRIDE(dimSizesRef);
423 assert(dimRank != 0);
424 index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef);
425 std::ostream &file = *
static_cast<std::ostream *
>(p);
426 file << dimRank <<
" " << nse <<
'\n';
428 file << dimSizes[d] <<
" ";
429 file << dimSizes[dimRank - 1] <<
'\n';
432 #define IMPL_OUTNEXT(VNAME, V) \
433 void _mlir_ciface_outSparseTensorWriterNext##VNAME( \
434 void *p, index_type dimRank, \
435 StridedMemRefType<index_type, 1> *dimCoordsRef, \
436 StridedMemRefType<V, 0> *vref) { \
438 ASSERT_NO_STRIDE(dimCoordsRef); \
439 const index_type *dimCoords = MEMREF_GET_PAYLOAD(dimCoordsRef); \
440 std::ostream &file = *static_cast<std::ostream *>(p); \
441 for (index_type d = 0; d < dimRank; d++) \
442 file << (dimCoords[d] + 1) << " "; \
443 V *value = MEMREF_GET_PAYLOAD(vref); \
444 file << *value << '\n'; \
473 constexpr
size_t bufSize = 80;
475 snprintf(var, bufSize,
"TENSOR%" PRIu64,
id);
476 char *env = getenv(var);
478 fprintf(stderr,
"Environment variable %s is not set\n", var);
494 (filename[0] == 0) ? &std::cout :
new std::ofstream(filename);
495 *file <<
"# extended FROSTT format\n";
496 return static_cast<void *
>(file);
500 std::ostream *file =
static_cast<std::ostream *
>(p);
502 assert(file->good());
503 if (file != &std::cout)
509 #undef MEMREF_GET_PAYLOAD
510 #undef ASSERT_USIZE_EQ
511 #undef MEMREF_GET_USIZE
512 #undef ASSERT_NO_STRIDE
#define MLIR_SPARSETENSOR_FOREVERY_O(DO)
#define MLIR_SPARSETENSOR_FOREVERY_V_O(DO)
#define MLIR_SPARSETENSOR_FOREVERY_V(DO)
MLIR_CRUNNERUTILS_EXPORT index_type sparseDimSize(void *tensor, index_type d)
Tensor-storage method to get the size of the given dimension.
MLIR_CRUNNERUTILS_EXPORT index_type sparseLvlSize(void *tensor, index_type l)
Tensor-storage method to get the size of the given level.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensor(void *tensor)
Releases the memory for the tensor-storage object.
MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNSE(void *p)
Returns the number of stored elements for the sparse tensor being read.
MLIR_CRUNNERUTILS_EXPORT void * _mlir_ciface_createCheckedSparseTensorReader(char *filename, StridedMemRefType< index_type, 1 > *dimShapeRef, PrimaryType valTp)
Constructs a new SparseTensorReader object, opens the file, reads the header, and validates that the ...
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterMetaData(void *p, index_type dimRank, index_type nse, StridedMemRefType< index_type, 1 > *dimSizesRef)
Outputs the sparse tensor dim-rank, nse, and dim-shape.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p)
Releases the SparseTensorReader and closes the associated file.
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_getSparseTensorReaderDimSizes(StridedMemRefType< index_type, 1 > *out, void *p)
SparseTensorReader method to obtain direct access to the dimension-sizes array.
MLIR_CRUNNERUTILS_EXPORT void * _mlir_ciface_newSparseTensor(StridedMemRefType< index_type, 1 > *dimSizesRef, StridedMemRefType< index_type, 1 > *lvlSizesRef, StridedMemRefType< LevelType, 1 > *lvlTypesRef, StridedMemRefType< index_type, 1 > *dim2lvlRef, StridedMemRefType< index_type, 1 > *lvl2dimRef, OverheadType posTp, OverheadType crdTp, PrimaryType valTp, Action action, void *ptr)
This is the "swiss army knife" method for materializing sparse tensors into the computation.
MLIR_CRUNNERUTILS_EXPORT char * getTensorFilename(index_type id)
Helper function to read a sparse tensor filename from the environment, defined with the naming conven...
MLIR_CRUNNERUTILS_EXPORT void endLexInsert(void *tensor)
Tensor-storage method to finalize lexicographic insertions.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorWriter(void *p)
Finalizes the outputing of a sparse tensor to a file and releases the SparseTensorWriter.
MLIR_CRUNNERUTILS_EXPORT void * createSparseTensorWriter(char *filename)
Creates a SparseTensorWriter for outputting a sparse tensor to a file with the given file name.
#define IMPL_EXPINSERT(VNAME, V)
#define IMPL_LEXINSERT(VNAME, V)
This class abstracts over the information stored in file headers, as well as providing the buffers an...
const uint64_t * getDimSizes() const
Gets the dimension-sizes array.
static SparseTensorReader * create(const char *filename, uint64_t dimRank, const uint64_t *dimShape, PrimaryType valTp)
Factory method to allocate a new reader, open the file, read the header, and validate that the actual...
uint64_t getRank() const
Gets the dimension-rank of the tensor.
Abstract base class for SparseTensorStorage<P,C,V>.
std::complex< double > complex64
OverheadType
Encoding of overhead types (both position overhead and coordinate overhead), for "overloading" @newSp...
Action
The actions performed by @newSparseTensor.
PrimaryType
Encoding of the elemental type, for "overloading" @newSparseTensor.
std::complex< float > complex32
uint64_t index_type
This type is used in the public API at all places where MLIR expects values with the built-in type "i...
StridedMemRef descriptor type specialized for rank 1.
StridedMemRef descriptor type with static rank.
This enum defines all the sparse representations supportable by the SparseTensor dialect.