Skip to content

Commit

Permalink
Fix QPUD Library Mode Installer Build (#1435)
Browse files Browse the repository at this point in the history
* Experimenting fixes for lib mode for qpud in the installer

* Add %cpp_std to run commands

* Use make_copyable_function util

* Debug the missing symbol

* Revert "Debug the missing symbol"

This reverts commit 7e83939.

* Try something

* Trying a fix

* Remove obsolete code comments

* Adds strict C++-17 checks

* Fixes for C++17 and unit tests

* Remove the workaround for cnot gate in C++17

* Debug test issues for installer

* Fixed issue with args by ref

* Revert "Debug test issues for installer"

This reverts commit 5e7bcbf.

* Set seed for test repeatability

* Fixes after merge

In this PR, we've re-enabled C++17 tests for the installer build CI,
hence we need to adjust those tests that use C++20 syntax.

* Fixed a typo: REQURIES -> REQUIRES

* Address code review

Move C++17 tests into separate test files.
  • Loading branch information
1tnguyen committed Apr 17, 2024
1 parent 6e0d50c commit 9df2fe6
Show file tree
Hide file tree
Showing 27 changed files with 666 additions and 57 deletions.
5 changes: 4 additions & 1 deletion docker/build/assets.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,10 @@ RUN python3 -m ensurepip --upgrade && python3 -m pip install lit && \
dnf install -y --nobest --setopt=install_weak_deps=False file which
RUN cd /cuda-quantum && source scripts/configure_build.sh && \
"$LLVM_INSTALL_PREFIX/bin/llvm-lit" -v build/test \
--param nvqpp_site_config=build/test/lit.site.cfg.py
--param nvqpp_site_config=build/test/lit.site.cfg.py && \
"$LLVM_INSTALL_PREFIX/bin/llvm-lit" -v build/targettests \
--param nvqpp_site_config=build/targettests/lit.site.cfg.py


# Tests for the Python wheel are run post-installation.
COPY --from=python_build /wheelhouse /cuda_quantum/wheelhouse
14 changes: 6 additions & 8 deletions runtime/common/JIT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
#include "mlir/ExecutionEngine/ExecutionEngine.h"
#include <cxxabi.h>

#define DEBUG_TYPE "cudaq-qpud"

namespace cudaq {

void invokeWrappedKernel(std::string_view irString,
Expand Down Expand Up @@ -79,14 +81,14 @@ void invokeWrappedKernel(std::string_view irString,
std::string demangledName(demangledPtr);
if (demangledName.rfind(wrappedKernelSymbol, 0) == 0 &&
demangledName.find(templatedTypeName) != std::string::npos) {
llvm::dbgs() << "Found symbol " << func.getName() << " for "
<< wrappedKernelSymbol;
LLVM_DEBUG(llvm::dbgs() << "Found symbol " << func.getName()
<< " for " << wrappedKernelSymbol);
mangledWrapper = func.getName().str();
fixUpLinkage(func);
}
if (demangledName.rfind(funcName, 0) == 0) {
llvm::dbgs() << "Found symbol " << func.getName() << " for "
<< funcName;
LLVM_DEBUG(llvm::dbgs() << "Found symbol " << func.getName()
<< " for " << funcName);
mangledKernel = func.getName().str();
fixUpLinkage(func);
}
Expand All @@ -109,10 +111,6 @@ void invokeWrappedKernel(std::string_view irString,
return std::make_unique<llvm::SectionMemoryManager>();
});
llvm::Triple targetTriple(llvm::Twine(llvmModule->getTargetTriple()));
// IMPORTANT: need to setAutoClaimResponsibilityForObjectSymbols to true to
// prevent debug asserts about symbol responsibility.
objectLayer->setAutoClaimResponsibilityForObjectSymbols(true);

return objectLayer;
};

Expand Down
28 changes: 20 additions & 8 deletions runtime/cudaq/algorithms/observe.h
Original file line number Diff line number Diff line change
Expand Up @@ -502,10 +502,16 @@ auto observe_async(const std::size_t qpu_id, QuantumKernel &&kernel, spin_op &H,
H, platform, shots, kernelName, qpu_id);
#else
return details::runObservationAsync(
[&kernel,
args = std::forward_as_tuple(std::forward<Args>(args)...)]() mutable {
std::apply(std::move(kernel), std::move(args));
},
detail::make_copyable_function([&kernel,
args = std::make_tuple(std::forward<Args>(
args)...)]() mutable {
std::apply(
[&kernel](Args &&...args) {
return cudaq::invokeKernel(std::forward<QuantumKernel>(kernel),
std::forward<Args>(args)...);
},
std::move(args));
}),
H, platform, shots, kernelName, qpu_id);
#endif
}
Expand Down Expand Up @@ -535,10 +541,16 @@ auto observe_async(std::size_t shots, std::size_t qpu_id,
H, platform, shots, kernelName, qpu_id);
#else
return details::runObservationAsync(
[&kernel,
args = std::forward_as_tuple(std::forward<Args>(args)...)]() mutable {
std::apply(std::move(kernel), std::move(args));
},
detail::make_copyable_function([&kernel,
args = std::make_tuple(std::forward<Args>(
args)...)]() mutable {
std::apply(
[&kernel](Args &&...args) {
return cudaq::invokeKernel(std::forward<QuantumKernel>(kernel),
std::forward<Args>(args)...);
},
std::move(args));
}),
H, platform, shots, kernelName, qpu_id);
#endif
}
Expand Down
28 changes: 20 additions & 8 deletions runtime/cudaq/algorithms/sample.h
Original file line number Diff line number Diff line change
Expand Up @@ -385,10 +385,16 @@ async_sample_result sample_async(const std::size_t qpu_id,
platform, kernelName, shots, qpu_id);
#else
return details::runSamplingAsync(
[&kernel,
args = std::forward_as_tuple(std::forward<Args>(args)...)]() mutable {
std::apply(std::move(kernel), std::move(args));
},
detail::make_copyable_function([&kernel,
args = std::make_tuple(std::forward<Args>(
args)...)]() mutable {
std::apply(
[&kernel](Args &&...args) {
return cudaq::invokeKernel(std::forward<QuantumKernel>(kernel),
std::forward<Args>(args)...);
},
std::move(args));
}),
platform, kernelName, shots, qpu_id);
#endif
}
Expand Down Expand Up @@ -437,10 +443,16 @@ async_sample_result sample_async(std::size_t shots, std::size_t qpu_id,
platform, kernelName, shots, qpu_id);
#else
return details::runSamplingAsync(
[&kernel,
args = std::forward_as_tuple(std::forward<Args>(args)...)]() mutable {
std::apply(std::move(kernel), std::move(args));
},
detail::make_copyable_function([&kernel,
args = std::make_tuple(std::forward<Args>(
args)...)]() mutable {
std::apply(
[&kernel](Args &&...args) {
return cudaq::invokeKernel(std::forward<QuantumKernel>(kernel),
std::forward<Args>(args)...);
},
std::move(args));
}),
platform, kernelName, shots, qpu_id);
#endif
}
Expand Down
7 changes: 2 additions & 5 deletions targettests/Remote-Sim/args_parsing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,9 @@
******************************************************************************/

// REQUIRES: remote-sim
// REQUIRES: c++20
// FIXME: https://github.com/NVIDIA/cuda-quantum/issues/1111

// clang-format off
// RUN: nvq++ --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// clang-format on

#include <cudaq.h>
Expand Down
4 changes: 2 additions & 2 deletions targettests/Remote-Sim/args_synthesis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@

// REQUIRES: remote-sim
// clang-format off
// RUN: nvq++ --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ --enable-mlir --no-aggressive-early-inline --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --no-aggressive-early-inline --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// clang-format on

// This is a comprehensive set of tests for kernel argument synthesis for remote
Expand Down
47 changes: 47 additions & 0 deletions targettests/Remote-Sim/free_func-cpp17.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/*******************************************************************************
* Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. *
* All rights reserved. *
* *
* This source code and the accompanying materials are made available under *
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/

// REQUIRES: c++17
// REQUIRES: remote-sim

// clang-format off
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// clang-format on

#include <cudaq.h>

void ghz(std::size_t N) __qpu__ {
cudaq::qvector q(N);
h(q[0]);
for (int i = 0; i < N - 1; i++) {
cx(q[i], q[i + 1]);
}
mz(q);
}

void ansatz(double theta) __qpu__ {
cudaq::qvector q(2);
x(q[0]);
ry(theta, q[1]);
cx(q[1], q[0]);
}

int main() {
auto counts = cudaq::sample(ghz, 10);
counts.dump();
assert(counts.size() == 2);
using namespace cudaq::spin;
cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) +
.21829 * z(0) - 6.125 * z(1);

double energy = cudaq::observe(ansatz, h, .59);
printf("Energy is %lf\n", energy);
assert(std::abs(energy + 1.748794) < 1e-3);
return 0;
}
5 changes: 2 additions & 3 deletions targettests/Remote-Sim/free_func.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@

// REQUIRES: remote-sim
// REQUIRES: c++20
// FIXME: https://github.com/NVIDIA/cuda-quantum/issues/1111

// clang-format off
// RUN: nvq++ --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// clang-format on

#include <cudaq.h>
Expand Down
62 changes: 62 additions & 0 deletions targettests/Remote-Sim/observe-cpp17.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/*******************************************************************************
* Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. *
* All rights reserved. *
* *
* This source code and the accompanying materials are made available under *
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/

// REQUIRES: c++17
// REQUIRES: remote-sim

// clang-format off
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// clang-format on

#include <cudaq.h>
#include <cudaq/algorithm.h>
#include <cudaq/builder.h>
#include <cudaq/gradients.h>
#include <cudaq/optimizers.h>

struct ansatz {
auto operator()(double theta) __qpu__ {
cudaq::qvector q(2);
x(q[0]);
ry(theta, q[1]);
cx(q[1], q[0]);
}
};

int main() {
using namespace cudaq::spin;
cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) +
.21829 * z(0) - 6.125 * z(1);
{
// Simple `cudaq::observe` test
double energy = cudaq::observe(ansatz{}, h, .59);
printf("Energy is %lf\n", energy);
assert(std::abs(energy + 1.748794) < 1e-3);
}
{
// Full VQE test with gradients
auto argMapper = [&](std::vector<double> x) {
return std::make_tuple(x[0]);
};
cudaq::gradients::parameter_shift gradient(ansatz{}, argMapper);
gradient.shiftScalar = 1e-1;
cudaq::optimizers::lbfgs optimizer_lbfgs;
optimizer_lbfgs.max_line_search_trials = 10;
auto [opt_val, opt_params] = optimizer_lbfgs.optimize(
1, [&](const std::vector<double> &x, std::vector<double> &grad_vec) {
double e = cudaq::observe(ansatz{}, h, x[0]);
gradient.compute(x, grad_vec, h, e);
printf("<H>(%lf, %lf) = %lf\n", x[0], x[1], e);
return e;
});
printf("Optimal value = %.16lf\n", opt_val);
assert(std::abs(opt_val + 1.748794) < 1e-3);
}
return 0;
}
7 changes: 3 additions & 4 deletions targettests/Remote-Sim/observe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,12 @@
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/

// REQUIRES: c++20
// REQUIRES: remote-sim
// FIXME: https://github.com/NVIDIA/cuda-quantum/issues/1111
// REQUIRES: c++20

// clang-format off
// RUN: nvq++ --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 1 %s -o %t && %t
// clang-format on

#include <cudaq.h>
Expand Down
55 changes: 55 additions & 0 deletions targettests/Remote-Sim/observe_async-cpp17.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/*******************************************************************************
* Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. *
* All rights reserved. *
* *
* This source code and the accompanying materials are made available under *
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/

// REQUIRES: remote-sim
// REQUIRES: c++17

// clang-format off
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 3 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 3 %s -o %t && %t
// clang-format on

#include <cudaq.h>

struct ansatz {
auto operator()(double theta) __qpu__ {
cudaq::qvector q(2);
x(q[0]);
ry(theta, q[1]);
cx(q[1], q[0]);
}
};

int main() {
using namespace cudaq::spin;
cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) +
.21829 * z(0) - 6.125 * z(1);
// Observe takes the kernel, the spin_op, and the concrete
// parameters for the kernel
auto energyFuture = cudaq::observe_async(/*qpu_id=*/0, ansatz{}, h, .59);
const double shift = 0.001;
auto plusFuture =
cudaq::observe_async(/*qpu_id=*/1, ansatz{}, h, .59 + shift);
auto minusFuture =
cudaq::observe_async(/*qpu_id=*/2, ansatz{}, h, .59 - shift);
const auto energy = energyFuture.get().expectation();
const double gradient =
(plusFuture.get().expectation() - minusFuture.get().expectation()) /
(2 * shift);
printf("Energy is %lf\n", energy);
printf("Gradient is %lf\n", gradient);
assert(std::abs(energy + 1.748794) < 1e-3);
// Shots-based observe async. API
cudaq::set_random_seed(13);
auto energyFutureShots =
cudaq::observe_async(/*shots=*/8192, /*qpu_id=*/0, ansatz{}, h, .59);
const auto energyShots = energyFutureShots.get().expectation();
printf("Energy (shots) is %lf\n", energyShots);
assert(std::abs(energyShots + 1.748794) < 0.1);
return 0;
}
12 changes: 9 additions & 3 deletions targettests/Remote-Sim/observe_async.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@

// REQUIRES: remote-sim
// REQUIRES: c++20
// FIXME: https://github.com/NVIDIA/cuda-quantum/issues/1111

// clang-format off
// RUN: nvq++ --target remote-mqpu --remote-mqpu-auto-launch 3 %s -o %t && %t
// RUN: nvq++ --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 3 %s -o %t && %t
// RUN: nvq++ %cpp_std --target remote-mqpu --remote-mqpu-auto-launch 3 %s -o %t && %t
// RUN: nvq++ %cpp_std --enable-mlir --target remote-mqpu --remote-mqpu-auto-launch 3 %s -o %t && %t
// clang-format on

#include <cudaq.h>
Expand Down Expand Up @@ -45,5 +44,12 @@ int main() {
printf("Energy is %lf\n", energy);
printf("Gradient is %lf\n", gradient);
assert(std::abs(energy + 1.748794) < 1e-3);
// Shots-based observe async. API
cudaq::set_random_seed(13);
auto energyFutureShots =
cudaq::observe_async(/*shots=*/8192, /*qpu_id=*/0, ansatz{}, h, .59);
const auto energyShots = energyFutureShots.get().expectation();
printf("Energy (shots) is %lf\n", energyShots);
assert(std::abs(energyShots + 1.748794) < 0.1);
return 0;
}
Loading

0 comments on commit 9df2fe6

Please sign in to comment.