@@ -717,10 +717,15 @@ static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
717717
718718// / Writes the sparse tensor to extended FROSTT format.
719719template <typename V>
720- void outSparseTensor (const SparseTensorCOO<V> &tensor, char *filename) {
721- auto &sizes = tensor.getSizes ();
722- auto &elements = tensor.getElements ();
723- uint64_t rank = tensor.getRank ();
720+ void outSparseTensor (void *tensor, void *dest, bool sort) {
721+ assert (tensor && dest);
722+ auto coo = static_cast <SparseTensorCOO<V> *>(tensor);
723+ if (sort)
724+ coo->sort ();
725+ char *filename = static_cast <char *>(dest);
726+ auto &sizes = coo->getSizes ();
727+ auto &elements = coo->getElements ();
728+ uint64_t rank = coo->getRank ();
724729 uint64_t nnz = elements.size ();
725730 std::fstream file;
726731 file.open (filename, std::ios_base::out | std::ios_base::trunc);
@@ -738,6 +743,67 @@ void outSparseTensor(const SparseTensorCOO<V> &tensor, char *filename) {
738743 file.flush ();
739744 file.close ();
740745 assert (file.good ());
746+ delete coo;
747+ }
748+
749+ // / Initializes sparse tensor from an external COO-flavored format.
750+ template <typename V>
751+ SparseTensorStorage<uint64_t , uint64_t , V> *
752+ toMLIRSparseTensor (uint64_t rank, uint64_t nse, uint64_t *shape, V *values,
753+ uint64_t *indices) {
754+ // Setup all-dims compressed and default ordering.
755+ std::vector<DimLevelType> sparse (rank, DimLevelType::kCompressed );
756+ std::vector<uint64_t > perm (rank);
757+ std::iota (perm.begin (), perm.end (), 0 );
758+ // Convert external format to internal COO.
759+ auto *tensor =
760+ SparseTensorCOO<V>::newSparseTensorCOO (rank, shape, perm.data (), nse);
761+ std::vector<uint64_t > idx (rank);
762+ for (uint64_t i = 0 , base = 0 ; i < nse; i++) {
763+ for (uint64_t r = 0 ; r < rank; r++)
764+ idx[r] = indices[base + r];
765+ tensor->add (idx, values[i]);
766+ base += rank;
767+ }
768+ // Return sparse tensor storage format as opaque pointer.
769+ return SparseTensorStorage<uint64_t , uint64_t , V>::newSparseTensor (
770+ rank, shape, perm.data (), sparse.data (), tensor);
771+ }
772+
773+ // / Converts a sparse tensor to an external COO-flavored format.
774+ template <typename V>
775+ void fromMLIRSparseTensor (void *tensor, uint64_t *pRank, uint64_t *pNse,
776+ uint64_t **pShape, V **pValues, uint64_t **pIndices) {
777+ auto sparseTensor =
778+ static_cast <SparseTensorStorage<uint64_t , uint64_t , V> *>(tensor);
779+ uint64_t rank = sparseTensor->getRank ();
780+ std::vector<uint64_t > perm (rank);
781+ std::iota (perm.begin (), perm.end (), 0 );
782+ SparseTensorCOO<V> *coo = sparseTensor->toCOO (perm.data ());
783+
784+ const std::vector<Element<V>> &elements = coo->getElements ();
785+ uint64_t nse = elements.size ();
786+
787+ uint64_t *shape = new uint64_t [rank];
788+ for (uint64_t i = 0 ; i < rank; i++)
789+ shape[i] = coo->getSizes ()[i];
790+
791+ V *values = new V[nse];
792+ uint64_t *indices = new uint64_t [rank * nse];
793+
794+ for (uint64_t i = 0 , base = 0 ; i < nse; i++) {
795+ values[i] = elements[i].value ;
796+ for (uint64_t j = 0 ; j < rank; j++)
797+ indices[base + j] = elements[i].indices [j];
798+ base += rank;
799+ }
800+
801+ delete coo;
802+ *pRank = rank;
803+ *pNse = nse;
804+ *pShape = shape;
805+ *pValues = values;
806+ *pIndices = indices;
741807}
742808
743809} // namespace
@@ -873,17 +939,6 @@ extern "C" {
873939 cursor, values, filled, added, count); \
874940 }
875941
876- #define IMPL_OUT (NAME, V ) \
877- void NAME (void *tensor, void *dest, bool sort) { \
878- assert (tensor &&dest); \
879- auto coo = static_cast <SparseTensorCOO<V> *>(tensor); \
880- if (sort) \
881- coo->sort (); \
882- char *filename = static_cast <char *>(dest); \
883- outSparseTensor<V>(*coo, filename); \
884- delete coo; \
885- }
886-
887942// Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor
888943// can safely rewrite kIndex to kU64. We make this assertion to guarantee
889944// that this file cannot get out of sync with its header.
@@ -1048,39 +1103,49 @@ IMPL_GETNEXT(getNextI32, int32_t)
10481103IMPL_GETNEXT(getNextI16, int16_t )
10491104IMPL_GETNEXT(getNextI8, int8_t )
10501105
1051- // / Helper to insert elements in lexicographical index order, one per value
1052- // / type.
1106+ // / Insert elements in lexicographical index order, one per value type.
10531107IMPL_LEXINSERT(lexInsertF64, double )
10541108IMPL_LEXINSERT(lexInsertF32, float )
10551109IMPL_LEXINSERT(lexInsertI64, int64_t )
10561110IMPL_LEXINSERT(lexInsertI32, int32_t )
10571111IMPL_LEXINSERT(lexInsertI16, int16_t )
10581112IMPL_LEXINSERT(lexInsertI8, int8_t )
10591113
1060- // / Helper to insert using expansion, one per value type.
1114+ // / Insert using expansion, one per value type.
10611115IMPL_EXPINSERT(expInsertF64, double )
10621116IMPL_EXPINSERT(expInsertF32, float )
10631117IMPL_EXPINSERT(expInsertI64, int64_t )
10641118IMPL_EXPINSERT(expInsertI32, int32_t )
10651119IMPL_EXPINSERT(expInsertI16, int16_t )
10661120IMPL_EXPINSERT(expInsertI8, int8_t )
10671121
1068- // / Helper to output a sparse tensor, one per value type.
1069- IMPL_OUT(outSparseTensorF64, double )
1070- IMPL_OUT(outSparseTensorF32, float )
1071- IMPL_OUT(outSparseTensorI64, int64_t )
1072- IMPL_OUT(outSparseTensorI32, int32_t )
1073- IMPL_OUT(outSparseTensorI16, int16_t )
1074- IMPL_OUT(outSparseTensorI8, int8_t )
1075-
10761122#undef CASE
10771123#undef IMPL_SPARSEVALUES
10781124#undef IMPL_GETOVERHEAD
10791125#undef IMPL_ADDELT
10801126#undef IMPL_GETNEXT
10811127#undef IMPL_LEXINSERT
10821128#undef IMPL_EXPINSERT
1083- #undef IMPL_OUT
1129+
1130+ // / Output a sparse tensor, one per value type.
1131+ void outSparseTensorF64 (void *tensor, void *dest, bool sort) {
1132+ return outSparseTensor<double >(tensor, dest, sort);
1133+ }
1134+ void outSparseTensorF32 (void *tensor, void *dest, bool sort) {
1135+ return outSparseTensor<float >(tensor, dest, sort);
1136+ }
1137+ void outSparseTensorI64 (void *tensor, void *dest, bool sort) {
1138+ return outSparseTensor<int64_t >(tensor, dest, sort);
1139+ }
1140+ void outSparseTensorI32 (void *tensor, void *dest, bool sort) {
1141+ return outSparseTensor<int32_t >(tensor, dest, sort);
1142+ }
1143+ void outSparseTensorI16 (void *tensor, void *dest, bool sort) {
1144+ return outSparseTensor<int16_t >(tensor, dest, sort);
1145+ }
1146+ void outSparseTensorI8 (void *tensor, void *dest, bool sort) {
1147+ return outSparseTensor<int8_t >(tensor, dest, sort);
1148+ }
10841149
10851150// ===----------------------------------------------------------------------===//
10861151//
@@ -1134,27 +1199,16 @@ void delSparseTensor(void *tensor) {
11341199// / values = [1.0, 5.0, 3.0]
11351200// / indices = [ 0, 0, 1, 1, 1, 2]
11361201//
1137- // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed
1202+ // TODO: generalize beyond 64-bit indices, no dim ordering, all dimensions
1203+ // compressed
11381204//
1139- void *convertToMLIRSparseTensor (uint64_t rank, uint64_t nse, uint64_t *shape,
1140- double *values, uint64_t *indices) {
1141- // Setup all-dims compressed and default ordering.
1142- std::vector<DimLevelType> sparse (rank, DimLevelType::kCompressed );
1143- std::vector<uint64_t > perm (rank);
1144- std::iota (perm.begin (), perm.end (), 0 );
1145- // Convert external format to internal COO.
1146- SparseTensorCOO<double > *tensor = SparseTensorCOO<double >::newSparseTensorCOO (
1147- rank, shape, perm.data (), nse);
1148- std::vector<uint64_t > idx (rank);
1149- for (uint64_t i = 0 , base = 0 ; i < nse; i++) {
1150- for (uint64_t r = 0 ; r < rank; r++)
1151- idx[r] = indices[base + r];
1152- tensor->add (idx, values[i]);
1153- base += rank;
1154- }
1155- // Return sparse tensor storage format as opaque pointer.
1156- return SparseTensorStorage<uint64_t , uint64_t , double >::newSparseTensor (
1157- rank, shape, perm.data (), sparse.data (), tensor);
1205+ void *convertToMLIRSparseTensorF64 (uint64_t rank, uint64_t nse, uint64_t *shape,
1206+ double *values, uint64_t *indices) {
1207+ return toMLIRSparseTensor<double >(rank, nse, shape, values, indices);
1208+ }
1209+ void *convertToMLIRSparseTensorF32 (uint64_t rank, uint64_t nse, uint64_t *shape,
1210+ float *values, uint64_t *indices) {
1211+ return toMLIRSparseTensor<float >(rank, nse, shape, values, indices);
11581212}
11591213
11601214// / Converts a sparse tensor to COO-flavored format expressed using C-style
@@ -1174,41 +1228,18 @@ void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape,
11741228// SparseTensorCOO, then to the output. We may want to reduce the number of
11751229// copies.
11761230//
1177- // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed
1231+ // TODO: generalize beyond 64-bit indices, no dim ordering, all dimensions
1232+ // compressed
11781233//
1179- void convertFromMLIRSparseTensor (void *tensor, uint64_t *pRank, uint64_t *pNse,
1180- uint64_t **pShape, double **pValues,
1181- uint64_t **pIndices) {
1182- SparseTensorStorage<uint64_t , uint64_t , double > *sparseTensor =
1183- static_cast <SparseTensorStorage<uint64_t , uint64_t , double > *>(tensor);
1184- uint64_t rank = sparseTensor->getRank ();
1185- std::vector<uint64_t > perm (rank);
1186- std::iota (perm.begin (), perm.end (), 0 );
1187- SparseTensorCOO<double > *coo = sparseTensor->toCOO (perm.data ());
1188-
1189- const std::vector<Element<double >> &elements = coo->getElements ();
1190- uint64_t nse = elements.size ();
1191-
1192- uint64_t *shape = new uint64_t [rank];
1193- for (uint64_t i = 0 ; i < rank; i++)
1194- shape[i] = coo->getSizes ()[i];
1195-
1196- double *values = new double [nse];
1197- uint64_t *indices = new uint64_t [rank * nse];
1198-
1199- for (uint64_t i = 0 , base = 0 ; i < nse; i++) {
1200- values[i] = elements[i].value ;
1201- for (uint64_t j = 0 ; j < rank; j++)
1202- indices[base + j] = elements[i].indices [j];
1203- base += rank;
1204- }
1205-
1206- delete coo;
1207- *pRank = rank;
1208- *pNse = nse;
1209- *pShape = shape;
1210- *pValues = values;
1211- *pIndices = indices;
1234+ void convertFromMLIRSparseTensorF64 (void *tensor, uint64_t *pRank,
1235+ uint64_t *pNse, uint64_t **pShape,
1236+ double **pValues, uint64_t **pIndices) {
1237+ fromMLIRSparseTensor<double >(tensor, pRank, pNse, pShape, pValues, pIndices);
1238+ }
1239+ void convertFromMLIRSparseTensorF32 (void *tensor, uint64_t *pRank,
1240+ uint64_t *pNse, uint64_t **pShape,
1241+ float **pValues, uint64_t **pIndices) {
1242+ fromMLIRSparseTensor<float >(tensor, pRank, pNse, pShape, pValues, pIndices);
12121243}
12131244
12141245} // extern "C"
0 commit comments