Skip to content

Commit

Permalink
Refactor lightning_gpu_utils unit tests to remove the dependency on s…
Browse files Browse the repository at this point in the history
…tatevector class (#675)

* Remove StateVector dependency for utils tests

* Auto update version

* update changelog

* add missing header file

* Trigger CIs

* Update .github/CHANGELOG.md

Co-authored-by: Ali Asadi <10773383+maliasadi@users.noreply.github.com>

---------

Co-authored-by: Dev version update bot <github-actions[bot]@users.noreply.github.com>
Co-authored-by: Ali Asadi <10773383+maliasadi@users.noreply.github.com>
  • Loading branch information
3 people committed Apr 12, 2024
1 parent c6a8260 commit 8ee3e00
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 36 deletions.
7 changes: 5 additions & 2 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
### New features since last release

* Add dynamic linking to LAPACK/OpenBlas shared objects in scipy.libs for both C++ and Python layer.
[(#651)](https://github.com/PennyLaneAI/pennylane-lightning/pull/651)
[(#653)](https://github.com/PennyLaneAI/pennylane-lightning/pull/651)

* `lightning.qubit` supports mid-circuit measurements.
[(#650)](https://github.com/PennyLaneAI/pennylane-lightning/pull/650)
Expand Down Expand Up @@ -36,7 +36,7 @@
### Breaking changes

* Deprecate static LAPACK linking support.
[(#651)](https://github.com/PennyLaneAI/pennylane-lightning/pull/651)
[(#653)](https://github.com/PennyLaneAI/pennylane-lightning/pull/651)

* Migrate `lightning.qubit` to the new device API.
[(#646)](https://github.com/PennyLaneAI/pennylane-lightning/pull/646)
Expand All @@ -46,6 +46,9 @@

### Improvements

* Refactor `lightning_gpu_utils` unit tests to remove the dependency on statevector class.
[(#675)](https://github.com/PennyLaneAI/pennylane-lightning/pull/675)

* Upgrade GitHub actions versions from v3 to v4.
[(#669)](https://github.com/PennyLaneAI/pennylane-lightning/pull/669)

Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/core/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.36.0-dev25"
__version__ = "0.36.0-dev26"
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,18 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <complex>
#include <cstdio>
#include <vector>

#include <catch2/catch.hpp>

#include "DataBuffer.hpp"
#include "LinearAlg.hpp"
#include "StateVectorCudaManaged.hpp"
#include "TestHelpers.hpp"
#include "Util.hpp" // exp2
#include "cuda_helpers.hpp"

/**
* @file
Expand All @@ -31,25 +33,34 @@
/// @cond DEV
namespace {
using namespace Pennylane::LightningGPU;
using namespace Pennylane::LightningGPU::Util;
using namespace Pennylane::Util;
} // namespace
/// @endcond

TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
double) {
using StateVectorT = StateVectorCudaManaged<TestType>;
using ComplexT = StateVectorT::ComplexT;
using CFP_t = StateVectorT::CFP_t;
using ComplexT = std::complex<TestType>;
using IdxT = typename std::conditional<std::is_same<TestType, float>::value,
int32_t, int64_t>::type;

using CFP_t =
typename std::conditional<std::is_same<TestType, float>::value,
cuFloatComplex, cuDoubleComplex>::type;

std::size_t num_qubits = 3;
std::size_t data_size = exp2(num_qubits);

std::vector<ComplexT> vectors = {{0.0, 0.0}, {0.0, 0.1}, {0.1, 0.1},
{0.1, 0.2}, {0.2, 0.2}, {0.3, 0.3},
{0.3, 0.4}, {0.4, 0.5}};

std::vector<CFP_t> vectors_cu;

std::transform(vectors.begin(), vectors.end(),
std::back_inserter(vectors_cu),
[](ComplexT x) { return complexToCu<ComplexT>(x); });

const std::vector<ComplexT> result_refs = {
{0.2, -0.1}, {-0.1, 0.2}, {0.2, 0.1}, {0.1, 0.2},
{0.7, -0.2}, {-0.1, 0.6}, {0.6, 0.1}, {0.2, 0.7}};
Expand All @@ -63,26 +74,26 @@ TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
{1.0, 0.0}, {0.0, -1.0}, {1.0, 0.0}, {0.0, 1.0},
{0.0, -1.0}, {1.0, 0.0}, {0.0, 1.0}, {1.0, 0.0}};

StateVectorT sv_x{num_qubits};
StateVectorT sv_y{num_qubits};
DataBuffer<CFP_t> sv_x(data_size);
DataBuffer<CFP_t> sv_y(data_size);

sv_x.CopyHostDataToGpu(vectors.data(), vectors.size());
sv_x.CopyHostDataToGpu(vectors_cu.data(), vectors_cu.size());

SECTION("Testing sparse matrix vector product:") {
std::vector<ComplexT> result(data_size);
std::vector<CFP_t> result(data_size);
auto cusparsehandle = make_shared_cusparse_handle();

cuUtil::SparseMV_cuSparse<IdxT, TestType, CFP_t>(
SparseMV_cuSparse<IdxT, TestType, CFP_t>(
indptr.data(), static_cast<int64_t>(indptr.size()), indices.data(),
values.data(), static_cast<int64_t>(values.size()), sv_x.getData(),
sv_y.getData(), sv_x.getDataBuffer().getDevTag().getDeviceID(),
sv_x.getDataBuffer().getDevTag().getStreamID(),
sv_x.getCusparseHandle());
sv_y.getData(), sv_x.getDevice(), sv_x.getStream(),
cusparsehandle.get());

sv_y.CopyGpuDataToHost(result.data(), result.size());

for (std::size_t j = 0; j < exp2(num_qubits); j++) {
CHECK(imag(result[j]) == Approx(imag(result_refs[j])));
CHECK(real(result[j]) == Approx(real(result_refs[j])));
CHECK(result[j].x == Approx(real(result_refs[j])));
CHECK(result[j].y == Approx(imag(result_refs[j])));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,35 +11,39 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <complex>
#include <cstdio>
#include <vector>

#include <catch2/catch.hpp>

#include "DataBuffer.hpp"
#include "MPILinearAlg.hpp"
#include "MPIManager.hpp"
#include "StateVectorCudaMPI.hpp"
#include "TestHelpers.hpp"
#include "Util.hpp" // exp2
#include "cuda_helpers.hpp"

/**
* @file
* Tests linear algebra functionality defined for the class StateVectorCudaMPI.
* Tests distributed linear algebra functionality.
*/

/// @cond DEV
namespace {
using namespace Pennylane::LightningGPU;
using namespace Pennylane::LightningGPU::Util;
using namespace Pennylane::Util;
} // namespace
/// @endcond

TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
double) {
using StateVectorT = StateVectorCudaMPI<TestType>;
using ComplexT = StateVectorT::ComplexT;
using CFP_t = StateVectorT::CFP_t;
using ComplexT = std::complex<TestType>;
using CFP_t =
typename std::conditional<std::is_same<TestType, float>::value,
cuFloatComplex, cuDoubleComplex>::type;
using IdxT = typename std::conditional<std::is_same<TestType, float>::value,
int32_t, int64_t>::type;

Expand All @@ -52,6 +56,11 @@ TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
{0.1, 0.2}, {0.2, 0.2}, {0.3, 0.3},
{0.3, 0.4}, {0.4, 0.5}};

std::vector<CFP_t> state_cu;

std::transform(state.begin(), state.end(), std::back_inserter(state_cu),
[](ComplexT x) { return complexToCu(x); });

std::vector<ComplexT> result_refs = {{0.2, -0.1}, {-0.1, 0.2}, {0.2, 0.1},
{0.1, 0.2}, {0.7, -0.2}, {-0.1, 0.6},
{0.6, 0.1}, {0.2, 0.7}};
Expand All @@ -65,7 +74,6 @@ TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
{1.0, 0.0}, {0.0, -1.0}, {1.0, 0.0}, {0.0, 1.0},
{0.0, -1.0}, {1.0, 0.0}, {0.0, 1.0}, {1.0, 0.0}};

size_t mpi_buffersize = 1;
size_t nGlobalIndexBits =
std::bit_width(static_cast<size_t>(mpi_manager.getSize())) - 1;
size_t nLocalIndexBits = num_qubits - nGlobalIndexBits;
Expand All @@ -80,6 +88,12 @@ TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
subSvLength, 0);
mpi_manager.Barrier();

std::vector<CFP_t> local_state_cu;

std::transform(local_state.begin(), local_state.end(),
std::back_inserter(local_state_cu),
[](ComplexT x) { return complexToCu(x); });

int nDevices = 0;
cudaGetDeviceCount(&nDevices);
REQUIRE(nDevices >= 2);
Expand All @@ -89,29 +103,27 @@ TEMPLATE_TEST_CASE("Linear Algebra::SparseMV", "[Linear Algebra]", float,
mpi_manager.Barrier();

SECTION("Testing sparse matrix vector product:") {
std::vector<ComplexT> local_result(local_state.size());
std::vector<CFP_t> local_result(local_state.size());
auto cusparsehandle = make_shared_cusparse_handle();

DataBuffer<CFP_t> sv_x(local_state.size());
DataBuffer<CFP_t> sv_y(local_state.size());

StateVectorT sv_x(mpi_manager, dt_local, mpi_buffersize,
nGlobalIndexBits, nLocalIndexBits);
StateVectorT sv_y(mpi_manager, dt_local, mpi_buffersize,
nGlobalIndexBits, nLocalIndexBits);
sv_x.CopyHostDataToGpu(local_state, false);
sv_x.CopyHostDataToGpu(local_state_cu.data(), local_state_cu.size());

cuUtil::SparseMV_cuSparseMPI<IdxT, TestType, CFP_t>(
SparseMV_cuSparseMPI<IdxT, TestType, CFP_t>(
mpi_manager, sv_x.getLength(), indptr.data(),
static_cast<int64_t>(indptr.size()), indices.data(), values.data(),
sv_x.getData(), sv_y.getData(),
sv_x.getDataBuffer().getDevTag().getDeviceID(),
sv_x.getDataBuffer().getDevTag().getStreamID(),
sv_x.getCusparseHandle());
sv_x.getData(), sv_y.getData(), sv_x.getDevice(), sv_x.getStream(),
cusparsehandle.get());

mpi_manager.Barrier();

sv_y.CopyGpuDataToHost(local_result.data(), local_result.size());

for (std::size_t j = 0; j < local_result.size(); j++) {
CHECK(imag(local_result[j]) == Approx(imag(local_result_refs[j])));
CHECK(real(local_result[j]) == Approx(real(local_result_refs[j])));
CHECK(local_result[j].y == Approx(imag(local_result_refs[j])));
CHECK(local_result[j].x == Approx(real(local_result_refs[j])));
}
}
}

0 comments on commit 8ee3e00

Please sign in to comment.