diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h index 468782ebef4e34..b207fc1ee104d3 100644 --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h @@ -197,7 +197,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase { const uint64_t *lvl2dim) : SparseTensorStorageBase(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim), - positions(lvlRank), coordinates(lvlRank), lvlCursor(lvlRank), coo() {} + positions(lvlRank), coordinates(lvlRank), lvlCursor(lvlRank) {} public: /// Constructs a sparse tensor with the given encoding, and allocates @@ -337,16 +337,6 @@ class SparseTensorStorage final : public SparseTensorStorageBase { } } - /// Allocates a new COO object and initializes it with the contents. - /// Callers must make sure to delete the COO when they're done with it. - SparseTensorCOO *toCOO() { - std::vector dimCoords(getDimRank()); - coo = new SparseTensorCOO(getDimSizes(), values.size()); - toCOO(0, 0, dimCoords); - assert(coo->getElements().size() == values.size()); - return coo; - } - /// Sort the unordered tensor in place, the method assumes that it is /// an unordered COO tensor. void sortInPlace() { @@ -556,58 +546,10 @@ class SparseTensorStorage final : public SparseTensorStorageBase { return -1u; } - // Performs forall on level entries and inserts into dim COO. - void toCOO(uint64_t parentPos, uint64_t l, std::vector &dimCoords) { - if (l == getLvlRank()) { - map.pushbackward(lvlCursor.data(), dimCoords.data()); - assert(coo); - assert(parentPos < values.size()); - coo->add(dimCoords, values[parentPos]); - return; - } - if (isCompressedLvl(l)) { - const std::vector

&positionsL = positions[l]; - assert(parentPos + 1 < positionsL.size()); - const uint64_t pstart = static_cast(positionsL[parentPos]); - const uint64_t pstop = static_cast(positionsL[parentPos + 1]); - const std::vector &coordinatesL = coordinates[l]; - assert(pstop <= coordinatesL.size()); - for (uint64_t pos = pstart; pos < pstop; pos++) { - lvlCursor[l] = static_cast(coordinatesL[pos]); - toCOO(pos, l + 1, dimCoords); - } - } else if (isLooseCompressedLvl(l)) { - const std::vector

&positionsL = positions[l]; - assert(2 * parentPos + 1 < positionsL.size()); - const uint64_t pstart = static_cast(positionsL[2 * parentPos]); - const uint64_t pstop = - static_cast(positionsL[2 * parentPos + 1]); - const std::vector &coordinatesL = coordinates[l]; - assert(pstop <= coordinatesL.size()); - for (uint64_t pos = pstart; pos < pstop; pos++) { - lvlCursor[l] = static_cast(coordinatesL[pos]); - toCOO(pos, l + 1, dimCoords); - } - } else if (isSingletonLvl(l) || isNOutOfMLvl(l)) { - assert(parentPos < coordinates[l].size()); - lvlCursor[l] = static_cast(coordinates[l][parentPos]); - toCOO(parentPos, l + 1, dimCoords); - } else { // Dense level. - assert(isDenseLvl(l)); - const uint64_t sz = getLvlSizes()[l]; - const uint64_t pstart = parentPos * sz; - for (uint64_t c = 0; c < sz; c++) { - lvlCursor[l] = c; - toCOO(pstart + c, l + 1, dimCoords); - } - } - } - std::vector> positions; std::vector> coordinates; std::vector values; std::vector lvlCursor; - SparseTensorCOO *coo; }; //===----------------------------------------------------------------------===// @@ -661,7 +603,6 @@ SparseTensorStorage::SparseTensorStorage( : SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim) { assert(!lvlCOO || lvlRank == lvlCOO->getRank()); - coo = lvlCOO; // Provide hints on capacity of positions and coordinates. // TODO: needs much fine-tuning based on actual sparsity; currently // we reserve position/coordinate space based on all previous dense