Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handling empty lists passed to Python kernels #1438

Merged
merged 6 commits into from
Mar 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions python/cudaq/kernel/kernel_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ def __call__(self, *args):
argEleTy):
processedArgs.append([float(i) for i in arg])
mlirType = self.argTypes[i]
continue

if not cc.CallableType.isinstance(
mlirType) and mlirType != self.argTypes[i]:
Expand Down
2 changes: 1 addition & 1 deletion python/runtime/cudaq/algorithms/py_draw.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ std::string pyDraw(py::object &kernel, py::args args) {
auto kernelName = kernel.attr("name").cast<std::string>();
auto kernelMod = kernel.attr("module").cast<MlirModule>();
args = simplifiedValidateInputArguments(args);
auto *argData = toOpaqueArgs(args);
auto *argData = toOpaqueArgs(args, kernelMod, kernelName);

return details::extractTrace([&]() mutable {
pyAltLaunchKernel(kernelName, kernelMod, *argData, {});
Expand Down
6 changes: 3 additions & 3 deletions python/runtime/cudaq/algorithms/py_observe_async.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,13 +83,14 @@ async_observe_result pyObserveAsync(py::object &kernel, spin_op &spin_operator,

auto &platform = cudaq::get_platform();
auto kernelName = kernel.attr("name").cast<std::string>();
auto kernelMod = kernel.attr("module").cast<MlirModule>();
auto kernelFunc = getKernelFuncOp(kernelMod, kernelName);

// The provided kernel is a builder or MLIR kernel
auto *argData = new cudaq::OpaqueArguments();
args = simplifiedValidateInputArguments(args);
cudaq::packArgs(*argData, args,
cudaq::packArgs(*argData, args, kernelFunc,
[](OpaqueArguments &, py::object &) { return false; });
auto kernelMod = kernel.attr("module").cast<MlirModule>();

// Launch the asynchronous execution.
py::gil_scoped_release release;
Expand All @@ -110,7 +111,6 @@ observe_result pyObservePar(const PyParType &type, py::object &kernel,
kernel.attr("compile")();

// Ensure the user input is correct.
// auto validatedArgs = validateInputArguments(kernel, args);
auto &platform = cudaq::get_platform();
if (!platform.supports_task_distribution())
throw std::runtime_error(
Expand Down
9 changes: 6 additions & 3 deletions python/runtime/cudaq/algorithms/py_sample_async.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,17 @@ for more information on this programming pattern.)#")
[&](py::object &kernel, py::args args, std::size_t shots,
std::size_t qpu_id) {
auto &platform = cudaq::get_platform();
auto kernelName = kernel.attr("name").cast<std::string>();
if (py::hasattr(kernel, "compile"))
kernel.attr("compile")();

auto kernelName = kernel.attr("name").cast<std::string>();
auto kernelMod = kernel.attr("module").cast<MlirModule>();
auto kernelFunc = getKernelFuncOp(kernelMod, kernelName);

args = simplifiedValidateInputArguments(args);
auto *argData = new cudaq::OpaqueArguments();
cudaq::packArgs(*argData, args);
auto kernelMod = kernel.attr("module").cast<MlirModule>();
cudaq::packArgs(*argData, args, kernelFunc,
[](OpaqueArguments &, py::object &) { return false; });

// The function below will be executed multiple times
// if the kernel has conditional feedback. In that case,
Expand Down
7 changes: 4 additions & 3 deletions python/runtime/cudaq/algorithms/py_state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ state pyGetState(py::object kernel, py::args args) {
args = simplifiedValidateInputArguments(args);

auto kernelMod = kernel.attr("module").cast<MlirModule>();
auto *argData = toOpaqueArgs(args);
auto *argData = toOpaqueArgs(args, kernelMod, kernelName);
return details::extractState([&]() mutable {
pyAltLaunchKernel(kernelName, kernelMod, *argData, {});
delete argData;
Expand Down Expand Up @@ -227,13 +227,14 @@ for more information on this programming pattern.)#")
kernel.attr("compile")();
auto &platform = cudaq::get_platform();
auto kernelName = kernel.attr("name").cast<std::string>();
auto kernelMod = kernel.attr("module").cast<MlirModule>();
auto kernelFunc = getKernelFuncOp(kernelMod, kernelName);
args = simplifiedValidateInputArguments(args);

// The provided kernel is a builder or MLIR kernel
auto *argData = new cudaq::OpaqueArguments();
cudaq::packArgs(*argData, args,
cudaq::packArgs(*argData, args, kernelFunc,
[](OpaqueArguments &, py::object &) { return false; });
auto kernelMod = kernel.attr("module").cast<MlirModule>();

// Launch the asynchronous execution.
py::gil_scoped_release release;
Expand Down
8 changes: 4 additions & 4 deletions python/runtime/cudaq/algorithms/py_vqe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,14 @@ bool isArgumentStdVec(MlirModule &module, const std::string &kernelName,
observe_result pyObserve(py::object &kernel, spin_op &spin_operator,
py::args args, const int shots,
bool argMapperProvided = false) {
if (py::hasattr(kernel, "compile"))
kernel.attr("compile")();
auto kernelName = kernel.attr("name").cast<std::string>();
auto kernelMod = kernel.attr("module").cast<MlirModule>();
auto &platform = cudaq::get_platform();
args = simplifiedValidateInputArguments(args);
auto *argData = toOpaqueArgs(args);
if (py::hasattr(kernel, "compile"))
kernel.attr("compile")();
auto *argData = toOpaqueArgs(args, kernelMod, kernelName);

auto kernelMod = kernel.attr("module").cast<MlirModule>();
auto numKernelArgs = getNumArguments(kernelMod, kernelName);
if (numKernelArgs == 0)
throw std::runtime_error(
Expand Down
69 changes: 44 additions & 25 deletions python/runtime/cudaq/platform/py_alt_launch_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,26 +108,40 @@ jitAndCreateArgs(const std::string &name, MlirModule module,
// We need to append the return type to the OpaqueArguments here
// so that we get a spot in the `rawArgs` memory for the
// altLaunchKernel function to dump the result
if (!isa<NoneType>(returnType)) {
if (returnType.isInteger(64)) {
py::args returnVal = py::make_tuple(py::int_(0));
packArgs(runtimeArgs, returnVal);
} else if (returnType.isInteger(1)) {
py::args returnVal = py::make_tuple(py::bool_(0));
packArgs(runtimeArgs, returnVal);
} else if (isa<FloatType>(returnType)) {
py::args returnVal = py::make_tuple(py::float_(0.0));
packArgs(runtimeArgs, returnVal);
} else {
std::string msg;
{
llvm::raw_string_ostream os(msg);
returnType.print(os);
}
throw std::runtime_error(
"Unsupported CUDA Quantum kernel return type - " + msg + ".\n");
}
}
if (!isa<NoneType>(returnType))
TypeSwitch<Type, void>(returnType)
.Case([&](IntegerType type) {
if (type.getIntOrFloatBitWidth() == 1) {
bool *ourAllocatedArg = new bool();
*ourAllocatedArg = 0;
runtimeArgs.emplace_back(ourAllocatedArg, [](void *ptr) {
delete static_cast<bool *>(ptr);
});
return;
}

long *ourAllocatedArg = new long();
*ourAllocatedArg = 0;
runtimeArgs.emplace_back(ourAllocatedArg, [](void *ptr) {
delete static_cast<long *>(ptr);
});
})
.Case([&](Float64Type type) {
double *ourAllocatedArg = new double();
*ourAllocatedArg = 0.;
runtimeArgs.emplace_back(ourAllocatedArg, [](void *ptr) {
delete static_cast<double *>(ptr);
});
})
.Default([](Type ty) {
std::string msg;
{
llvm::raw_string_ostream os(msg);
ty.print(os);
}
throw std::runtime_error(
"Unsupported CUDA Quantum kernel return type - " + msg + ".\n");
});

void *rawArgs = nullptr;
std::size_t size = 0;
Expand Down Expand Up @@ -319,8 +333,10 @@ void bindAltLaunchKernel(py::module &mod) {
"pyAltLaunchKernel",
[&](const std::string &kernelName, MlirModule module,
py::args runtimeArgs, std::vector<std::string> callable_names) {
auto kernelFunc = getKernelFuncOp(module, kernelName);

cudaq::OpaqueArguments args;
cudaq::packArgs(args, runtimeArgs, callableArgHandler);
cudaq::packArgs(args, runtimeArgs, kernelFunc, callableArgHandler);
pyAltLaunchKernel(kernelName, module, args, callable_names);
},
py::arg("kernelName"), py::arg("module"), py::kw_only(),
Expand All @@ -329,8 +345,10 @@ void bindAltLaunchKernel(py::module &mod) {
"pyAltLaunchKernelR",
[&](const std::string &kernelName, MlirModule module, MlirType returnType,
py::args runtimeArgs, std::vector<std::string> callable_names) {
auto kernelFunc = getKernelFuncOp(module, kernelName);

cudaq::OpaqueArguments args;
cudaq::packArgs(args, runtimeArgs, callableArgHandler);
cudaq::packArgs(args, runtimeArgs, kernelFunc, callableArgHandler);
return pyAltLaunchKernelR(kernelName, module, returnType, args,
callable_names);
},
Expand All @@ -341,20 +359,21 @@ void bindAltLaunchKernel(py::module &mod) {
mod.def("synthesize", [](py::object kernel, py::args runtimeArgs) {
MlirModule module = kernel.attr("module").cast<MlirModule>();
auto name = kernel.attr("name").cast<std::string>();
auto kernelFuncOp = getKernelFuncOp(module, name);
cudaq::OpaqueArguments args;
cudaq::packArgs(args, runtimeArgs);
cudaq::packArgs(args, runtimeArgs, kernelFuncOp,
[](OpaqueArguments &, py::object &) { return false; });
return synthesizeKernel(name, module, args);
});

mod.def(
"get_qir",
[](py::object kernel, py::args runtimeArgs, std::string profile) {
[](py::object kernel, std::string profile) {
if (py::hasattr(kernel, "compile"))
kernel.attr("compile")();
MlirModule module = kernel.attr("module").cast<MlirModule>();
auto name = kernel.attr("name").cast<std::string>();
cudaq::OpaqueArguments args;
cudaq::packArgs(args, runtimeArgs);
return getQIRLL(name, module, args, profile);
},
py::arg("kernel"), py::kw_only(), py::arg("profile") = "");
Expand Down
27 changes: 16 additions & 11 deletions python/tests/kernel/test_kernel_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,9 @@ def test_pauli_word_input():
1, 3, 3, -0.0454063, -0, 15
]
h = cudaq.SpinOperator(h2_data, 4)

@cudaq.kernel
def kernel(theta : float, var : cudaq.pauli_word):
def kernel(theta: float, var: cudaq.pauli_word):
q = cudaq.qvector(4)
x(q[0])
x(q[1])
Expand All @@ -215,17 +215,18 @@ def kernel(theta : float, var : cudaq.pauli_word):
want_exp = cudaq.observe(kernel, h, .11, 'XXXY').expectation()
assert np.isclose(want_exp, -1.13, atol=1e-2)

want_exp = cudaq.observe(kernel, h, .11, cudaq.pauli_word('XXXY')).expectation()
want_exp = cudaq.observe(kernel, h, .11,
cudaq.pauli_word('XXXY')).expectation()
assert np.isclose(want_exp, -1.13, atol=1e-2)

@cudaq.kernel
def test(theta : float, paulis: list[cudaq.pauli_word]):
def test(theta: float, paulis: list[cudaq.pauli_word]):
q = cudaq.qvector(4)
x(q[0])
x(q[1])
for p in paulis:
exp_pauli(theta, q, p)

print(test)
want_exp = cudaq.observe(test, h, .11, ['XXXY']).expectation()
assert np.isclose(want_exp, -1.13, atol=1e-2)
Expand Down Expand Up @@ -853,19 +854,20 @@ def test():

@skipIfPythonLessThan39
def test_bool_list_elements():

@cudaq.kernel
def kernel(var : list[bool]):
def kernel(var: list[bool]):
q = cudaq.qubit()
x(q)
if var[0]:
x(q)

counts = cudaq.sample(kernel, [False], shots_count=100)
assert '1' in counts and len(counts) == 1

counts = cudaq.sample(kernel, [True], shots_count=100)
assert '0' in counts and len(counts) == 1


def test_list_float_pass_list_int():

Expand Down Expand Up @@ -916,13 +918,16 @@ def test2() -> int:

@skipIfPythonLessThan39
def test_empty_lists():

@cudaq.kernel
def empty(var : list[cudaq.pauli_word], varvar : list[float], varvarvar :list[bool]):
def empty(var: list[cudaq.pauli_word], varvar: list[float],
varvarvar: list[bool]):
q = cudaq.qvector(2)
x(q[0])

empty([], [], [])


def test_no_valueerror_np_array():

@cudaq.kernel
Expand Down
Loading
Loading