From b2c4bafe5435ea85398a51a2f4d3074137b478c7 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 15:41:56 +0800 Subject: [PATCH 01/52] Bug fixes for batch execution --- pennylane/gradients/vjp.py | 3 ++- pennylane/interfaces/batch/autograd.py | 4 ++-- pennylane/interfaces/batch/tensorflow.py | 12 +++++++++--- pennylane/interfaces/batch/torch.py | 12 +++++++++--- pennylane/transforms/batch_transform.py | 10 ++++++++-- 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/pennylane/gradients/vjp.py b/pennylane/gradients/vjp.py index 0dfa57f6878..fa886b687d6 100644 --- a/pennylane/gradients/vjp.py +++ b/pennylane/gradients/vjp.py @@ -318,7 +318,8 @@ def processing_fn(results): vjp_ = processing_fns[t_idx](res_t) if vjp_ is None: - vjps.append(None) + if reduction == "append": + vjps.append(None) continue if isinstance(reduction, str): diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index 9c3060a8c15..97a56bb13e8 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -101,7 +101,7 @@ def _execute( if the nth-order derivative is requested. Do not set this argument unless you understand the consequences! """ - with qml.tape.Unwrap(*tapes): + with qml.tape.Unwrap(*tapes, set_trainable=False): res, jacs = execute_fn(tapes, **gradient_kwargs) for i, r in enumerate(res): @@ -158,7 +158,7 @@ def grad_fn(dy): """Returns the vector-Jacobian product with given parameter values and output gradient dy""" - dy = dy[0] + dy = [qml.math.T(d) for d in dy[0]] jacs = ans[1] if jacs: diff --git a/pennylane/interfaces/batch/tensorflow.py b/pennylane/interfaces/batch/tensorflow.py index 3678a5cd1f7..1c4592dbfd3 100644 --- a/pennylane/interfaces/batch/tensorflow.py +++ b/pennylane/interfaces/batch/tensorflow.py @@ -65,9 +65,6 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d list[list[tf.Tensor]]: A nested list of tape results. Each element in the returned list corresponds in order to the provided tapes. """ - with qml.tape.Unwrap(*tapes): - # Forward pass: execute the tapes - res, jacs = execute_fn(tapes, **gradient_kwargs) parameters = [] params_unwrapped = [] @@ -75,6 +72,8 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d for i, tape in enumerate(tapes): # store the trainable parameters params = tape.get_parameters(trainable_only=False) + tape.trainable_params = qml.math.get_trainable_indices(params) + parameters += [p for i, p in enumerate(params) if i in tape.trainable_params] # store all unwrapped parameters @@ -82,6 +81,11 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d [i.numpy() if isinstance(i, (tf.Variable, tf.Tensor)) else i for i in params] ) + with qml.tape.Unwrap(*tapes, set_trainable=False): + # Forward pass: execute the tapes + res, jacs = execute_fn(tapes, **gradient_kwargs) + + for i, tape in enumerate(tapes): # convert output to TensorFlow tensors r = np.hstack(res[i]) if res[i].dtype == np.dtype("object") else res[i] res[i] = tf.convert_to_tensor(r) @@ -92,6 +96,8 @@ def grad_fn(*dy, **tfkwargs): """Returns the vector-Jacobian product with given parameter values and output gradient dy""" + dy = [qml.math.T(d) for d in dy] + if jacs: # Jacobians were computed on the forward pass (mode="forward") # No additional quantum evaluations needed; simply compute the VJPs directly. diff --git a/pennylane/interfaces/batch/torch.py b/pennylane/interfaces/batch/torch.py index 151cdd591ba..b81d6d16c80 100644 --- a/pennylane/interfaces/batch/torch.py +++ b/pennylane/interfaces/batch/torch.py @@ -81,7 +81,7 @@ def forward(ctx, kwargs, *parameters): # pylint: disable=arguments-differ ctx.max_diff = kwargs["max_diff"] ctx._n = kwargs.get("_n", 1) - with qml.tape.Unwrap(*ctx.tapes): + with qml.tape.Unwrap(*ctx.tapes, set_trainable=False): res, ctx.jacs = ctx.execute_fn(ctx.tapes, **ctx.gradient_kwargs) # if any input tensor uses the GPU, the output should as well @@ -93,11 +93,17 @@ def forward(ctx, kwargs, *parameters): # pylint: disable=arguments-differ break for i, r in enumerate(res): - if r.dtype is np.dtype("object"): + if isinstance(r, np.ndarray) and r.dtype is np.dtype("object"): # For backwards compatibility, we flatten ragged tape outputs r = np.hstack(r) - res[i] = torch.as_tensor(r, device=ctx.torch_device) + if isinstance(r, (list, tuple)): + res[i] = [torch.as_tensor(t) for t in r] + + if isinstance(r, tuple): + res[i] = tuple(res[i]) + else: + res[i] = torch.as_tensor(r, device=ctx.torch_device) if ctx.jacs: ctx.jacs[i] = torch.as_tensor(ctx.jacs[i], device=ctx.torch_device) diff --git a/pennylane/transforms/batch_transform.py b/pennylane/transforms/batch_transform.py index ba4a4730950..f17a2eb02d1 100644 --- a/pennylane/transforms/batch_transform.py +++ b/pennylane/transforms/batch_transform.py @@ -14,8 +14,11 @@ """Contains tools and decorators for registering batch transforms.""" # pylint: disable=too-few-public-methods import functools +import types +import warnings import pennylane as qml +from pennylane.new_qnode import QNode class batch_transform: @@ -160,6 +163,9 @@ def __init__(self, transform_fn, expand_fn=None, differentiable=True): self.differentiable = differentiable functools.update_wrapper(self, transform_fn) + def custom_qnode_wrapper(self, fn): + self.qnode_execution_wrapper = types.MethodType(fn, self) + def qnode_execution_wrapper(self, qnode, targs, tkwargs): """A wrapper method that takes a QNode and transform arguments, and returns a function that 'wraps' the QNode execution. @@ -177,7 +183,7 @@ def _wrapper(*args, **kwargs): interface = qnode.interface # TODO: extract gradient_fn from QNode - gradient_fn = qnode.diff_method + gradient_fn = getattr(qnode, "gradient_fn", qnode.diff_method) if interface is None or not self.differentiable: gradient_fn = None @@ -205,7 +211,7 @@ def __call__(self, qnode, *targs, **tkwargs): # tapes, fn = some_transform(tape, *transform_args) return self.construct(qnode, *targs, **tkwargs) - if isinstance(qnode, qml.QNode): + if isinstance(qnode, (qml.QNode, QNode, qml.ExpvalCost)): # Input is a QNode: # result = some_transform(qnode, *transform_args)(*qnode_args) wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) From 317514b9facad8a81957bcc90205d8cd65d931b0 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 15:43:53 +0800 Subject: [PATCH 02/52] more tests --- tests/interfaces/test_batch_autograd.py | 5 +++-- tests/interfaces/test_batch_tensorflow.py | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/interfaces/test_batch_autograd.py b/tests/interfaces/test_batch_autograd.py index 50f7cef6932..27f00b902c3 100644 --- a/tests/interfaces/test_batch_autograd.py +++ b/tests/interfaces/test_batch_autograd.py @@ -634,13 +634,14 @@ def cost(x, y, device): expected = np.array( [ - [[-np.sin(x) / 2, 0], [np.sin(x) / 2, 0]], + [[-np.sin(x) / 2, 0], [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]], [ - [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2], + [np.sin(x) / 2, 0], [np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2], ], ] ) + assert np.allclose(res, expected, atol=tol, rtol=0) def test_ragged_differentiation(self, execute_kwargs, tol): diff --git a/tests/interfaces/test_batch_tensorflow.py b/tests/interfaces/test_batch_tensorflow.py index 369dc48af59..9b2969f9b8e 100644 --- a/tests/interfaces/test_batch_tensorflow.py +++ b/tests/interfaces/test_batch_tensorflow.py @@ -600,12 +600,12 @@ def test_probability_differentiation(self, execute_kwargs, tol): expected = np.array( [ [ - [-tf.sin(x) / 2, -tf.sin(x) * tf.cos(y) / 2], - [tf.sin(x) / 2, tf.cos(y) * tf.sin(x) / 2], + [-tf.sin(x) / 2, tf.sin(x) / 2], + [-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2], ], [ - [0, -tf.cos(x) * tf.sin(y) / 2], - [0, tf.cos(x) * tf.sin(y) / 2], + [0, 0], + [-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2], ], ] ) From a937d4d2bb58757225a153cfe99dc71557eda1d1 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 16:06:44 +0800 Subject: [PATCH 03/52] more tests --- pennylane/gradients/gradient_transform.py | 4 +- pennylane/transforms/batch_transform.py | 59 ++++++++++++++++++++--- 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index d70ef25f5ef..a491a0268e6 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -141,12 +141,12 @@ def __init__(self, transform_fn, expand_fn=gradient_expand, differentiable=True, self.hybrid = hybrid super().__init__(transform_fn, expand_fn=expand_fn, differentiable=differentiable) - def qnode_execution_wrapper(self, qnode, targs, tkwargs): + def default_qnode_wrapper(self, qnode, targs, tkwargs): # Here, we overwrite the QNode execution wrapper in order # to take into account that classical processing may be present # inside the QNode. hybrid = tkwargs.pop("hybrid", self.hybrid) - _wrapper = super().qnode_execution_wrapper(qnode, targs, tkwargs) + _wrapper = super().default_qnode_wrapper(qnode, targs, tkwargs) cjac_fn = qml.transforms.classical_jacobian(qnode) def jacobian_wrapper(*args, **kwargs): diff --git a/pennylane/transforms/batch_transform.py b/pennylane/transforms/batch_transform.py index f17a2eb02d1..e8290e5952d 100644 --- a/pennylane/transforms/batch_transform.py +++ b/pennylane/transforms/batch_transform.py @@ -15,10 +15,8 @@ # pylint: disable=too-few-public-methods import functools import types -import warnings import pennylane as qml -from pennylane.new_qnode import QNode class batch_transform: @@ -161,12 +159,59 @@ def __init__(self, transform_fn, expand_fn=None, differentiable=True): self.transform_fn = transform_fn self.expand_fn = expand_fn self.differentiable = differentiable + self.qnode_wrapper = self.default_qnode_wrapper functools.update_wrapper(self, transform_fn) def custom_qnode_wrapper(self, fn): - self.qnode_execution_wrapper = types.MethodType(fn, self) + """Register a custom QNode execution wrapper function + for the batch transform. + + **Example** + + .. code-block:: python + + def my_transform(tape, *targs, **tkwargs): + ... + return tapes, processing_fn + + @my_transform.custom_qnode_wrapper + def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): + def wrapper_fn(*args, **kwargs): + # construct QNode + qnode.construct(args, kwargs) + # apply transform to QNode's tapes + tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs) + # execute tapes and return processed result + ... + return processing_fn(results) + return wrapper_fn + + The custom QNode execution wrapper must have arguments + ``self`` (the batch transform object), ``qnode`` (the input QNode + to transform and execute), ``targs`` and ``tkwargs`` (the transform + arguments and keyword arguments respectively). + + It should return a callable object that accepts the *same* arguments + as the QNode, and returns the transformed numerical result. + + The default :meth:`~.default_qnode_wrapper` method may be called + if only post-processing is required: + + .. code-block:: python + + @my_transform.custom_qnode_wrapper + def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): + transformed_qnode = self.default_qnode_wrapper(qnode) + + def wrapper_fn(*args, **kwargs): + res = transformed_qnode(*args, **kwargs) + ... + return ... + return wrapper_fn + """ + self.qnode_wrapper = types.MethodType(fn, self) - def qnode_execution_wrapper(self, qnode, targs, tkwargs): + def default_qnode_wrapper(self, qnode, targs, tkwargs): """A wrapper method that takes a QNode and transform arguments, and returns a function that 'wraps' the QNode execution. @@ -211,10 +256,10 @@ def __call__(self, qnode, *targs, **tkwargs): # tapes, fn = some_transform(tape, *transform_args) return self.construct(qnode, *targs, **tkwargs) - if isinstance(qnode, (qml.QNode, QNode, qml.ExpvalCost)): + if isinstance(qnode, (qml.QNode, qml.ExpvalCost)): # Input is a QNode: # result = some_transform(qnode, *transform_args)(*qnode_args) - wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) + wrapper = self.qnode_wrapper(qnode, targs, tkwargs) wrapper = functools.wraps(qnode)(wrapper) else: @@ -236,7 +281,7 @@ def __call__(self, qnode, *targs, **tkwargs): targs = (qnode,) + targs def wrapper(qnode): - _wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) + _wrapper = self.qnode_wrapper(qnode, targs, tkwargs) _wrapper = functools.wraps(qnode)(_wrapper) return _wrapper From c6dc6294bd6de20df5cf79322da14c84f9f2757b Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 16:58:27 +0800 Subject: [PATCH 04/52] more tests --- pennylane/interfaces/batch/autograd.py | 1 - tests/interfaces/test_batch_autograd.py | 32 +++++++++++++++++++++++ tests/interfaces/test_batch_tensorflow.py | 30 +++++++++++++++++++++ tests/interfaces/test_batch_torch.py | 29 ++++++++++++++++++++ 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index 97a56bb13e8..c5666906cf7 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -184,7 +184,6 @@ def grad_fn(dy): ) vjps = processing_fn(execute_fn(vjp_tapes)[0]) - else: vjp_tapes, processing_fn = qml.gradients.batch_vjp( tapes, dy, gradient_fn, reduction="append", gradient_kwargs=gradient_kwargs diff --git a/tests/interfaces/test_batch_autograd.py b/tests/interfaces/test_batch_autograd.py index 27f00b902c3..aca6ec91103 100644 --- a/tests/interfaces/test_batch_autograd.py +++ b/tests/interfaces/test_batch_autograd.py @@ -439,6 +439,38 @@ def cost(a, b, device): expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]] assert np.allclose(res, expected, atol=tol, rtol=0) + def test_tape_no_parameters(self, execute_kwargs, tol): + """Test that a tape with no parameters is correctly + ignored during the gradient computation""" + dev = qml.device("default.qubit", wires=1) + + def cost(params): + with qml.tape.JacobianTape() as tape1: + qml.Hadamard(0) + qml.expval(qml.PauliX(0)) + + with qml.tape.JacobianTape() as tape2: + qml.RY(np.array(0.5, requires_grad=False), wires=0) + qml.expval(qml.PauliZ(0)) + + with qml.tape.JacobianTape() as tape3: + qml.RY(params[0], wires=0) + qml.RX(params[1], wires=0) + qml.expval(qml.PauliZ(0)) + + return sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) + + params = np.array([0.1, 0.2], requires_grad=True) + x, y = params + + res = cost(params) + expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) + assert np.allclose(res, expected, atol=tol, rtol=0) + + grad = qml.grad(cost)(params) + expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + def test_reusing_quantum_tape(self, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = np.array(0.1, requires_grad=True) diff --git a/tests/interfaces/test_batch_tensorflow.py b/tests/interfaces/test_batch_tensorflow.py index 9b2969f9b8e..948d0a858f2 100644 --- a/tests/interfaces/test_batch_tensorflow.py +++ b/tests/interfaces/test_batch_tensorflow.py @@ -367,6 +367,36 @@ def test_jacobian(self, execute_kwargs, tol): expected = [[-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]] assert np.allclose(expected, [agrad, bgrad], atol=tol, rtol=0) + def test_tape_no_parameters(self, execute_kwargs, tol): + """Test that a tape with no parameters is correctly + ignored during the gradient computation""" + dev = qml.device("default.qubit", wires=1) + params = tf.Variable([0.1, 0.2], dtype=tf.float64) + x, y = 1.0 * params + + with tf.GradientTape() as t: + with qml.tape.JacobianTape() as tape1: + qml.Hadamard(0) + qml.expval(qml.PauliX(0)) + + with qml.tape.JacobianTape() as tape2: + qml.RY(0.5, wires=0) + qml.expval(qml.PauliZ(0)) + + with qml.tape.JacobianTape() as tape3: + qml.RY(params[0], wires=0) + qml.RX(params[1], wires=0) + qml.expval(qml.PauliZ(0)) + + res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) + + expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) + assert np.allclose(res, expected, atol=tol, rtol=0) + + grad = t.gradient(res, params) + expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + def test_reusing_quantum_tape(self, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = tf.Variable(0.1, dtype=tf.float64) diff --git a/tests/interfaces/test_batch_torch.py b/tests/interfaces/test_batch_torch.py index ba0e0ca41f7..483c1bbd4b0 100644 --- a/tests/interfaces/test_batch_torch.py +++ b/tests/interfaces/test_batch_torch.py @@ -439,6 +439,35 @@ def test_jacobian(self, torch_device, execute_kwargs, tol): assert torch.allclose(a.grad, expected[0], atol=tol, rtol=0) assert torch.allclose(b.grad, expected[1], atol=tol, rtol=0) + def test_tape_no_parameters(self, torch_device, execute_kwargs, tol): + """Test that a tape with no parameters is correctly + ignored during the gradient computation""" + dev = qml.device("default.qubit", wires=1) + params = torch.tensor([0.1, 0.2], requires_grad=True, device=torch_device) + x, y = params.detach() + + with qml.tape.JacobianTape() as tape1: + qml.Hadamard(0) + qml.expval(qml.PauliX(0)) + + with qml.tape.JacobianTape() as tape2: + qml.RY(0.5, wires=0) + qml.expval(qml.PauliZ(0)) + + with qml.tape.JacobianTape() as tape3: + qml.RY(params[0], wires=0) + qml.RX(params[1], wires=0) + qml.expval(qml.PauliZ(0)) + + res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) + expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) + assert np.allclose(res.detach(), expected, atol=tol, rtol=0) + + res.backward() + grad = params.grad.detach() + expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + def test_reusing_quantum_tape(self, torch_device, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = torch.tensor(0.1, requires_grad=True, device=torch_device) From 0a5756aef05f590aee4547cac479179d2fb572fb Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 17:14:57 +0800 Subject: [PATCH 05/52] more tests --- pennylane/transforms/batch_transform.py | 3 ++- tests/transforms/test_batch_transform.py | 34 ++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/pennylane/transforms/batch_transform.py b/pennylane/transforms/batch_transform.py index e8290e5952d..5c7999ce092 100644 --- a/pennylane/transforms/batch_transform.py +++ b/pennylane/transforms/batch_transform.py @@ -195,7 +195,7 @@ def wrapper_fn(*args, **kwargs): as the QNode, and returns the transformed numerical result. The default :meth:`~.default_qnode_wrapper` method may be called - if only post-processing is required: + if only pre- or post-processing dependent on QNode arguments is required: .. code-block:: python @@ -204,6 +204,7 @@ def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): transformed_qnode = self.default_qnode_wrapper(qnode) def wrapper_fn(*args, **kwargs): + args, kwargs = pre_process(args, kwargs) res = transformed_qnode(*args, **kwargs) ... return ... diff --git a/tests/transforms/test_batch_transform.py b/tests/transforms/test_batch_transform.py index f9498e46a46..01c14d5062a 100644 --- a/tests/transforms/test_batch_transform.py +++ b/tests/transforms/test_batch_transform.py @@ -235,6 +235,40 @@ def circuit(x): expected = fn(dev.batch_execute(tapes)) assert res == expected + def test_custom_qnode_wrapper(self, capsys): + """Test that the QNode execution wrapper can be overridden + if required.""" + a = 0.654 + x = 0.543 + + dev = qml.device("default.qubit", wires=2) + + @qml.batch_transform + def my_transform(tape, a): + tape1 = tape.copy() + tape2 = tape.copy() + return [tape1, tape2], lambda res: a * qml.math.sum(res) + + @my_transform.custom_qnode_wrapper + def qnode_wrapper(self, qnode, targs, tkwargs): + wrapper = self.default_qnode_wrapper(qnode, targs, tkwargs) + assert targs == (a,) + assert tkwargs == {} + print("custom wrapper called") + return wrapper + + @my_transform(a) + @qml.qnode(dev) + def circuit(x): + qml.Hadamard(wires=0) + qml.RX(x, wires=0) + return qml.expval(qml.PauliX(0)) + + circuit(x) + + captured = capsys.readouterr() + assert captured.out == "custom wrapper called\n" + @pytest.mark.parametrize("diff_method", ["parameter-shift", "backprop", "finite-diff"]) class TestBatchTransformGradients: From 490f106f94d346e5cef064bc897aff99d47972a2 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 17:18:30 +0800 Subject: [PATCH 06/52] more tests --- tests/interfaces/test_batch_torch.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/interfaces/test_batch_torch.py b/tests/interfaces/test_batch_torch.py index 483c1bbd4b0..2c9db85c427 100644 --- a/tests/interfaces/test_batch_torch.py +++ b/tests/interfaces/test_batch_torch.py @@ -789,6 +789,27 @@ def test_sampling(self, torch_device, execute_kwargs): assert res.shape == (2, 10) assert isinstance(res, torch.Tensor) + def test_sampling_expval(self, torch_device, execute_kwargs): + """Test sampling works as expected if combined with expectation values""" + if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward": + pytest.skip("Adjoint differentiation does not support samples") + + dev = qml.device("default.qubit", wires=2, shots=10) + + with qml.tape.JacobianTape() as tape: + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + qml.sample(qml.PauliZ(0)) + qml.expval(qml.PauliX(1)) + + res = execute([tape], dev, **execute_kwargs)[0] + + assert len(res) == 2 + assert isinstance(res, tuple) + assert res[0].shape == (10,) + assert isinstance(res[0], torch.Tensor) + assert isinstance(res[1], torch.Tensor) + def test_sampling_gradient_error(self, torch_device, execute_kwargs): """Test differentiating a tape with sampling results in an error""" if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward": From fa104917480fc58d696f4f5853d1fadeb33252f9 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 17:40:46 +0800 Subject: [PATCH 07/52] changelog --- .github/CHANGELOG.md | 1 + pennylane/interfaces/batch/autograd.py | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 222410636a0..cf461c0f8c6 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -273,6 +273,7 @@ [(#1549)](https://github.com/PennyLaneAI/pennylane/pull/1549) [(#1608)](https://github.com/PennyLaneAI/pennylane/pull/1608) [(#1618)](https://github.com/PennyLaneAI/pennylane/pull/1618) + [(#1637)](https://github.com/PennyLaneAI/pennylane/pull/1637) For example: diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index c5666906cf7..97a56bb13e8 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -184,6 +184,7 @@ def grad_fn(dy): ) vjps = processing_fn(execute_fn(vjp_tapes)[0]) + else: vjp_tapes, processing_fn = qml.gradients.batch_vjp( tapes, dy, gradient_fn, reduction="append", gradient_kwargs=gradient_kwargs From d70f0c83e13bf4d179c0933985f3eb6f967a8da7 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 18:00:31 +0800 Subject: [PATCH 08/52] Add metric tensor --- pennylane/math/utils.py | 4 +- pennylane/qnode.py | 4 + pennylane/transforms/__init__.py | 3 +- pennylane/transforms/metric_tensor.py | 292 ++++++++++----------- tests/gradients/test_gradient_transform.py | 2 +- tests/interfaces/test_qnode_autograd.py | 16 +- tests/interfaces/test_qnode_tf.py | 15 +- tests/interfaces/test_qnode_torch.py | 15 +- tests/math/test_functions.py | 3 +- tests/tape/test_unwrap.py | 2 +- tests/transforms/test_metric_tensor.py | 89 +++---- 11 files changed, 198 insertions(+), 247 deletions(-) diff --git a/pennylane/math/utils.py b/pennylane/math/utils.py index f66c375c7bf..585c930d3c8 100644 --- a/pennylane/math/utils.py +++ b/pennylane/math/utils.py @@ -268,9 +268,7 @@ def requires_grad(tensor, interface=None): if isinstance(tensor, ArrayBox): return True - # Currently, in the Autograd interface, we assume - # that all objects are differentiable by default. - return getattr(tensor, "requires_grad", True) + return getattr(tensor, "requires_grad", False) if interface == "torch": return getattr(tensor, "requires_grad", False) diff --git a/pennylane/qnode.py b/pennylane/qnode.py index c93ef9619af..352ae744cf0 100644 --- a/pennylane/qnode.py +++ b/pennylane/qnode.py @@ -617,6 +617,10 @@ def construct(self, args, kwargs): # provide the jacobian options self.qtape.jacobian_options = self.diff_options + if self.diff_options["method"] == "backprop": + params = self.qtape.get_parameters(trainable_only=False) + self.qtape.trainable_params = qml.math.get_trainable_indices(params) + def __call__(self, *args, **kwargs): # If shots specified in call but not in qfunc signature, diff --git a/pennylane/transforms/__init__.py b/pennylane/transforms/__init__.py index 986195e9502..f267b13961b 100644 --- a/pennylane/transforms/__init__.py +++ b/pennylane/transforms/__init__.py @@ -82,7 +82,6 @@ :toctree: api ~transforms.measurement_grouping - ~transforms.metric_tensor_tape ~transforms.hamiltonian_expand Decorators and utility functions @@ -111,7 +110,7 @@ from .hamiltonian_expand import hamiltonian_expand from .invisible import invisible from .measurement_grouping import measurement_grouping -from .metric_tensor import metric_tensor, metric_tensor_tape +from .metric_tensor import metric_tensor from .optimization import ( cancel_inverses, commute_controlled, diff --git a/pennylane/transforms/metric_tensor.py b/pennylane/transforms/metric_tensor.py index d84dd70163e..adb3fc011cb 100644 --- a/pennylane/transforms/metric_tensor.py +++ b/pennylane/transforms/metric_tensor.py @@ -14,104 +14,142 @@ """ Contains the metric tensor transform """ +import functools import warnings import numpy as np import pennylane as qml +from .batch_transform import batch_transform + + +SUPPORTED_OPS = ["RX", "RY", "RZ", "PhaseShift"] + + def _stopping_critera(obj): - if getattr(obj, "num_params", 0) == 0: - return True + return getattr(obj, "num_params", 0) == 0 or obj.name in SUPPORTED_OPS - if obj.name in ["RX", "RY", "RZ", "PhaseShift"]: - return True - return False +def expand_fn(tape): + new_tape = tape.expand(depth=2, stop_at=_stopping_critera) + params = new_tape.get_parameters(trainable_only=False) + new_tape.trainable_params = qml.math.get_trainable_indices(params) + return new_tape + + +@functools.partial(batch_transform, expand_fn=expand_fn) +def metric_tensor(tape, diag_approx=False, argnum=None): + """Returns a function that computes the block-diagonal approximation of the metric tensor + of a given QNode. + .. note:: -def metric_tensor_tape(tape, diag_approx=False, wrt=None): - """Returns a list of tapes, and a classical processing function, for computing the block - diagonal metric tensor approximation of an input tape on hardware. + Currently, only the :class:`~.RX`, :class:`~.RY`, :class:`~.RZ`, and + :class:`~.PhaseShift` parametrized gates are supported. + All other parametrized gates will be decomposed if possible. Args: - tape (.QuantumTape): the tape to compute the metric tensor of - diag_approx (bool): If ``True`` the diagonal approximation to the metric - tensor is computed. If ``False``, a block diagonal approximation - to the metric tensor is computed. - wrt (Sequence[int]): Indices of the tape parameters with which to - compute the metric tensor. Parameter indices not included are - treated as *fixed* parameters. Defaults to the tape's trainable - parameters. + qnode (.QNode or .QuantumTape): quantum tape or QNode to find the metric tensor of + diag_approx (bool): iff True, use the diagonal approximation + argnum (int or list[int] or None): Trainable parameter indices to differentiate + with respect to. If not provided, the derivative with respect to all + trainable indices are returned. + hybrid (bool): Specifies whether classical processing inside a QNode + should be taken into account when transforming a QNode. + + - If ``True``, and classical processing is detected and this + option is set to ``True``, the Jacobian of the classical + processing will be computed and included. When evaluated, the + returned metric tensor will be with respect to the QNode arguments. + + - If ``False``, any internal QNode classical processing will be + **ignored**. When evaluated, the returned metric tensor will be with + respect to the **gate** arguments, and not the QNode arguments. Returns: - tuple[list[.QuantumTape], func]: Returns a tuple containing a list of - quantum tapes to be evaluated, and a function to be applied to these - tape results to compute the metric tensor. + func: Function which accepts the same arguments as the QNode. When called, this + function will return the metric tensor. **Example** - Given the following quantum tape, + Consider the following QNode: .. code-block:: python - with qml.tape.QuantumTape() as tape: + dev = qml.device("default.qubit", wires=3) + + @qml.qnode(dev, interface="autograd") + def circuit(weights): # layer 1 - qml.RX(0.1, wires=0) - qml.RX(0.2, wires=1) + qml.RX(weights[0, 0], wires=0) + qml.RX(weights[0, 1], wires=1) qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) # layer 2 - qml.RZ(0.4, wires=0) - qml.RZ(0.5, wires=2) + qml.RZ(weights[1, 0], wires=0) + qml.RZ(weights[1, 1], wires=2) qml.CNOT(wires=[0, 1]) qml.CNOT(wires=[1, 2]) + return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)), qml.expval(qml.PauliY(2)) - qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - qml.expval(qml.PauliY(2)) - - We can use the ``metric_tensor`` transform to generate a new tapes and a classical - processing function for computing the metric tensor. - - >>> mt_tapes, fn = qml.transforms.metric_tensor_tape(tape) - >>> print(mt_tapes) - [, ] - >>> print(mt_tapes[0].draw()) - 0: ──H──╭┤ Probs - 1: ──H──├┤ Probs - 2: ─────╰┤ Probs - >>> print(mt_tapes[1].draw()) - 0: ──RX(0.1)──╭C──────╭┤ Probs - 1: ──RX(0.2)──╰X──╭C──├┤ Probs - 2: ───────────────╰X──╰┤ Probs - - We can evaluate these tapes on a device: - - >>> dev = qml.device("default.qubit", wires=3) - >>> res = dev.batch_execute(mt_tapes) - >>> print(res) - [array([[0.25, 0. , 0.25, 0. , 0.25, 0. , 0.25, 0. ]]), - array([[9.87560268e-01, 0.00000000e+00, 0.00000000e+00, 9.94181506e-03, - 2.48960206e-05, 0.00000000e+00, 0.00000000e+00, 2.47302134e-03]])] - - Applying the processing function results in the metric tensor: - - >>> fn(res) - array([[0.25 , 0. , 0. , 0. ], - [0. , 0.25 , 0. , 0. ], - [0. , 0. , 0.00249168, 0.00244201], - [0. , 0. , 0.00244201, 0.01226071]]) - """ + We can use the ``metric_tensor`` function to generate a new function, that returns the + metric tensor of this QNode: - # For parametrized operations, only the RX, RY, RZ, and PhaseShift gates are supported. - # Expand out all other gates. - tape = tape.expand(depth=2, stop_at=_stopping_critera) + >>> met_fn = qml.metric_tensor(circuit) + >>> weights = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], requires_grad=True) + >>> met_fn(weights) + tensor([[0.25 , 0. , 0. , 0. ], + [0. , 0.25 , 0. , 0. ], + [0. , 0. , 0.0025, 0.0024], + [0. , 0. , 0.0024, 0.0123]], requires_grad=True) - if wrt is not None: - tape.trainable_params = set(wrt) + The returned metric tensor is also fully differentiable, in all interfaces. + For example, differentiating the ``(3, 2)`` element: + + >>> grad_fn = qml.grad(lambda x: met_fn(x)[3, 2]) + >>> grad_fn(weights) + array([[ 0.04867729, -0.00049502, 0. ], + [ 0. , 0. , 0. ]]) + + .. UsageDetails:: + + This transform can also be applied to low-level + :class:`~.QuantumTape` objects. This will result in no implicit quantum + device evaluation. Instead, the processed tapes, and post-processing + function, which together define the gradient are directly returned: + + >>> params = np.array([1.7, 1.0, 0.5], requires_grad=True) + >>> with qml.tape.QuantumTape() as tape: + ... qml.RX(params[0], wires=0) + ... qml.RY(params[1], wires=0) + ... qml.CNOT(wires=[0, 1]) + ... qml.PhaseShift(params[2], wires=1) + ... qml.expval(qml.PauliX(0)) + >>> tapes, fn = qml.metric_tensor(tape) + >>> tapes + [, + , + ] + + This can be useful if the underlying circuits representing the gradient + computation need to be analyzed. + + The output tapes can then be evaluated and post-processed to retrieve + the gradient: + + >>> dev = qml.device("default.qubit", wires=2) + >>> fn(qml.execute(tapes, dev, None)) + array([[0.25 , 0. , 0. ], + [0. , 0.00415023, 0. ], + [0. , 0. , 0.24878844]]) + """ + + if argnum is not None: + tape.trainable_params = set(argnum) # get the circuit graph graph = tape.graph @@ -152,7 +190,7 @@ def metric_tensor_tape(tape, diag_approx=False, wrt=None): # to measure in the basis of the parametrized layer generators. with tape.__class__() as layer_tape: for op in queue: - op.queue() + qml.apply(op) for o in obs_list[-1]: o.diagonalizing_gates() @@ -177,71 +215,14 @@ def processing_fn(probs): return metric_tensor_tapes, processing_fn -def metric_tensor(qnode, diag_approx=False, only_construct=False): - """Returns a function that returns the value of the metric tensor - of a given QNode. - - .. note:: - - Currently, only the :class:`~.RX`, :class:`~.RY`, :class:`~.RZ`, and - :class:`~.PhaseShift` parametrized gates are supported. - All other parametrized gates will be decomposed if possible. - - Args: - qnode (.QNode or .ExpvalCost): QNode(s) to compute the metric tensor of - diag_approx (bool): iff True, use the diagonal approximation - only_construct (bool): Iff True, construct the circuits used for computing - the metric tensor but do not execute them, and return the tapes. - - Returns: - func: Function which accepts the same arguments as the QNode. When called, this - function will return the metric tensor. - - **Example** - - Consider the following QNode: - - .. code-block:: python - - dev = qml.device("default.qubit", wires=3) - - @qml.qnode(dev, interface="autograd") - def circuit(weights): - # layer 1 - qml.RX(weights[0, 0], wires=0) - qml.RX(weights[0, 1], wires=1) - - qml.CNOT(wires=[0, 1]) - qml.CNOT(wires=[1, 2]) - - # layer 2 - qml.RZ(weights[1, 0], wires=0) - qml.RZ(weights[1, 1], wires=2) - - qml.CNOT(wires=[0, 1]) - qml.CNOT(wires=[1, 2]) - return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)), qml.expval(qml.PauliY(2)) - - We can use the ``metric_tensor`` function to generate a new function, that returns the - metric tensor of this QNode: +@metric_tensor.custom_qnode_wrapper +def qnode_execution_wrapper(self, qnode, targs, tkwargs): + # Here, we overwrite the QNode execution wrapper in order + # to take into account that classical processing may be present + # inside the QNode. + hybrid = tkwargs.pop("hybrid", True) - >>> met_fn = qml.metric_tensor(circuit) - >>> weights = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], requires_grad=True) - >>> met_fn(weights) - tensor([[0.25 , 0. , 0. , 0. ], - [0. , 0.25 , 0. , 0. ], - [0. , 0. , 0.0025, 0.0024], - [0. , 0. , 0.0024, 0.0123]], requires_grad=True) - - The returned metric tensor is also fully differentiable, in all interfaces. - For example, differentiating the ``(3, 2)`` element: - - >>> grad_fn = qml.grad(lambda x: met_fn(x)[3, 2]) - >>> grad_fn(weights) - array([[ 0.04867729, -0.00049502, 0. ], - [ 0. , 0. , 0. ]]) - """ - if qnode.__class__.__name__ == "ExpvalCost": + if isinstance(qnode, qml.ExpvalCost): if qnode._multiple_devices: # pylint: disable=protected-access warnings.warn( "ExpvalCost was instantiated with multiple devices. Only the first device " @@ -250,30 +231,43 @@ def circuit(weights): qnode = qnode.qnodes.qnodes[0] - def _metric_tensor_fn(*args, **kwargs): - jac = qml.math.stack(qml.transforms.classical_jacobian(qnode)(*args, **kwargs)) - jac = qml.math.reshape(jac, [qnode.qtape.num_params, -1]) + mt_fn = self.default_qnode_wrapper(qnode, targs, tkwargs) + cjac_fn = qml.transforms.classical_jacobian(qnode) + + def wrapper(*args, **kwargs): + mt = mt_fn(*args, **kwargs) - wrt, perm = np.nonzero(qml.math.toarray(jac)) - perm = np.argsort(np.argsort(perm)) + if not hybrid: + return mt + + cjac = cjac_fn(*args, **kwargs) + + if isinstance(cjac, tuple): + if len(cjac) == 1: + cjac = cjac[0] + else: + # Classical processing of multiple arguments is present. Return mt @ cjac. + metric_tensors = [] - qnode.construct(args, kwargs) + for c in cjac: + if c is not None: + _mt = qml.math.tensordot(mt, c, [[-1], [0]]) + _mt = qml.math.tensordot(c, _mt, [[0], [0]]) + metric_tensors.append(_mt) - metric_tensor_tapes, processing_fn = metric_tensor_tape( - qnode.qtape, - diag_approx=diag_approx, - wrt=wrt.tolist() if qnode.diff_options["method"] == "backprop" else None, - ) + return metric_tensors - if only_construct: - return metric_tensor_tapes + is_square = cjac.shape == (1,) or (cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]) - res = [t.execute(device=qnode.device) for t in metric_tensor_tapes] - mt = processing_fn(res) + if is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0])): + # Classical Jacobian is the identity. No classical processing + # is present inside the QNode. + return mt - # permute rows ad columns - mt = qml.math.gather(mt, perm) - mt = qml.math.gather(qml.math.T(mt), perm) + # Classical processing of a single argument is present. Return mt @ cjac. + cjac = qml.math.convert_like(cjac, mt) + mt = qml.math.tensordot(mt, cjac, [[-1], [0]]) + mt = qml.math.tensordot(cjac, mt, [[0], [0]]) return mt - return _metric_tensor_fn + return wrapper diff --git a/tests/gradients/test_gradient_transform.py b/tests/gradients/test_gradient_transform.py index 260446f4fe9..5b84db167d1 100644 --- a/tests/gradients/test_gradient_transform.py +++ b/tests/gradients/test_gradient_transform.py @@ -270,7 +270,7 @@ def circuit(data, weights): res = qml.gradients.param_shift(circuit)(d, w) classical_jac = spy.spy_return(d, w) - assert np.allclose(classical_jac, np.array([[0, 2 * w[0], 0], [0, 0, 1]]).T) + assert np.allclose(classical_jac, np.array([[2 * w[0], 0], [0, 1]]).T) expected = np.array([-2 * x * np.cos(np.cos(d)) * np.sin(x ** 2), 0]) assert np.allclose(res, expected, atol=tol, rtol=0) diff --git a/tests/interfaces/test_qnode_autograd.py b/tests/interfaces/test_qnode_autograd.py index df8f94e5173..c4d81d054b8 100644 --- a/tests/interfaces/test_qnode_autograd.py +++ b/tests/interfaces/test_qnode_autograd.py @@ -423,22 +423,10 @@ def circuit(a, p): return qml.expval(qml.PauliX(0)) res = circuit(a, p) - - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + assert circuit.qtape.trainable_params == {1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - # In backprop mode, all parameters are returned. - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * ( np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2]) diff --git a/tests/interfaces/test_qnode_tf.py b/tests/interfaces/test_qnode_tf.py index c674c3b5c43..ff728a49a6e 100644 --- a/tests/interfaces/test_qnode_tf.py +++ b/tests/interfaces/test_qnode_tf.py @@ -447,20 +447,9 @@ def circuit(a, p): with tf.GradientTape() as tape: res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - + assert circuit.qtape.trainable_params == {1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * ( tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2]) diff --git a/tests/interfaces/test_qnode_torch.py b/tests/interfaces/test_qnode_torch.py index f0ca8ce7a9f..7a0bc1ec917 100644 --- a/tests/interfaces/test_qnode_torch.py +++ b/tests/interfaces/test_qnode_torch.py @@ -458,20 +458,9 @@ def circuit(a, p): res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - + assert circuit.qtape.trainable_params == {1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) expected = np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0]) + np.sin(a) * ( np.cos(p_val[2]) * np.sin(p_val[1]) diff --git a/tests/math/test_functions.py b/tests/math/test_functions.py index 4304b3c6960..bf8286508c7 100644 --- a/tests/math/test_functions.py +++ b/tests/math/test_functions.py @@ -1610,8 +1610,7 @@ def cost_fn(params): values = [[0.1, 0.2], np.tensor(0.1, requires_grad=True), np.tensor([0.5, 0.2])] cost_fn(values) - # Currently, we assume *all* objects are trainable by default in Autograd - assert res == {0, 1, 2} + assert res == {1, 2} def test_autograd_unwrapping_backward(self): """Test that the trainability indices of a sequence of Autograd arrays diff --git a/tests/tape/test_unwrap.py b/tests/tape/test_unwrap.py index d6a8a947a05..f6cf1f33f63 100644 --- a/tests/tape/test_unwrap.py +++ b/tests/tape/test_unwrap.py @@ -99,7 +99,7 @@ def test_unwrap_autograd(): params = tape.get_parameters(trainable_only=False) assert all(isinstance(i, float) for i in params) assert np.allclose(params, [0.1, 0.2, 0.5, 0.3]) - assert tape.trainable_params == {0, 2, 3} + assert tape.trainable_params == {0, 3} # outside the context, the original parameters have been restored. assert tape.get_parameters(trainable_only=False) == p diff --git a/tests/transforms/test_metric_tensor.py b/tests/transforms/test_metric_tensor.py index 56eac898769..720c9d02875 100644 --- a/tests/transforms/test_metric_tensor.py +++ b/tests/transforms/test_metric_tensor.py @@ -15,10 +15,11 @@ Unit tests for the metric tensor transform. """ import pytest -import numpy as np +from pennylane import numpy as np from scipy.linalg import block_diag import pennylane as qml +from pennylane import QNode, qnode from gate_data import Y, Z @@ -29,14 +30,13 @@ class TestMetricTensor: def test_rot_decomposition(self, diff_method): """Test that the rotation gate is correctly decomposed""" dev = qml.device("default.qubit", wires=1) + params = np.array([1.0, 2.0, 3.0], requires_grad=True) - def circuit(weights): - qml.Rot(weights[0], weights[1], weights[2], wires=0) - return qml.expval(qml.PauliX(0)) + with qml.tape.QuantumTape() as circuit: + qml.Rot(params[0], params[1], params[2], wires=0) + qml.expval(qml.PauliX(0)) - circuit = qml.QNode(circuit, dev, diff_method=diff_method) - params = np.array([1.0, 2.0, 3.0]) - tapes = qml.metric_tensor(circuit, only_construct=True)(params) + tapes, _ = qml.metric_tensor(circuit) assert len(tapes) == 3 # first parameter subcircuit @@ -58,9 +58,6 @@ def circuit(weights): assert tapes[2].operations[0].data == [1] assert tapes[2].operations[1].data == [2] - result = qml.metric_tensor(circuit)(params) - assert result.shape == (3, 3) - @pytest.mark.parametrize("diff_method", ["parameter-shift", "backprop"]) def test_multirz_decomposition(self, diff_method): """Test that the MultiRZ gate is correctly decomposed""" @@ -71,7 +68,7 @@ def circuit(a, b): qml.MultiRZ(b, wires=[0, 1, 2]) return qml.expval(qml.PauliX(0)) - circuit = qml.QNode(circuit, dev, diff_method=diff_method) + circuit = QNode(circuit, dev, diff_method=diff_method) params = [0.1, 0.2] result = qml.metric_tensor(circuit)(*params) assert result.shape == (2, 2) @@ -90,41 +87,36 @@ def circuit(a): qml.RX(a, wires=0) return qml.expval(qml.PauliX(0)) - circuit = qml.QNode(circuit, dev, diff_method=diff_method) + circuit = QNode(circuit, dev, diff_method=diff_method) params = [0.1] - result = qml.metric_tensor(circuit)(*params) + result = qml.metric_tensor(circuit, hybrid=False)(*params) assert result.shape == (2, 2) def test_generator_no_expval(self, monkeypatch): """Test exception is raised if subcircuit contains an operation with generator object that is not an observable""" - dev = qml.device("default.qubit", wires=1) - - def circuit(a): - qml.RX(a, wires=0) - return qml.expval(qml.PauliX(0)) - - circuit = qml.QNode(circuit, dev) - with monkeypatch.context() as m: m.setattr("pennylane.RX.generator", [qml.RX, 1]) + with qml.tape.QuantumTape() as tape: + qml.RX(np.array(0.5, requires_grad=True), wires=0) + qml.expval(qml.PauliX(0)) + with pytest.raises(qml.QuantumFunctionError, match="no corresponding observable"): - circuit.metric_tensor(1.0, only_construct=True) + qml.metric_tensor(tape) def test_construct_subcircuit(self): """Test correct subcircuits constructed""" dev = qml.device("default.qubit", wires=2) - def circuit(a, b, c): - qml.RX(a, wires=0) - qml.RY(b, wires=0) + with qml.tape.QuantumTape() as tape: + qml.RX(np.array(1.0, requires_grad=True), wires=0) + qml.RY(np.array(1.0, requires_grad=True), wires=0) qml.CNOT(wires=[0, 1]) - qml.PhaseShift(c, wires=1) + qml.PhaseShift(np.array(1.0, requires_grad=True), wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) - circuit = qml.QNode(circuit, dev) - tapes = circuit.metric_tensor(1.0, 1.0, 1.0, only_construct=True) + tapes, _ = qml.metric_tensor(tape) assert len(tapes) == 3 # first parameter subcircuit @@ -151,8 +143,9 @@ def test_construct_subcircuit_layers(self): """Test correct subcircuits constructed when a layer structure exists""" dev = qml.device("default.qubit", wires=3) + params = np.ones([8]) - def circuit(params): + with qml.tape.QuantumTape() as tape: # section 1 qml.RX(params[0], wires=0) # section 2 @@ -173,10 +166,7 @@ def circuit(params): qml.CNOT(wires=[1, 2]) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2)) - circuit = qml.QNode(circuit, dev) - - params = np.ones([8]) - tapes = circuit.metric_tensor(params, only_construct=True) + tapes, _ = qml.metric_tensor(tape) # this circuit should split into 4 independent # sections or layers when constructing subcircuits @@ -236,14 +226,14 @@ def circuit(a, b, c): qml.PhaseShift(c, wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) - circuit = qml.QNode(circuit, dev) + circuit = QNode(circuit, dev) a = 0.432 b = 0.12 c = -0.432 # evaluate metric tensor - g = circuit.metric_tensor(a, b, c) + g = qml.metric_tensor(circuit)(a, b, c) # check that the metric tensor is correct expected = ( @@ -287,7 +277,7 @@ def final(x, y, z, h, g, f): qml.RX(h, wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2)) - final = qml.QNode(final, dev, diff_method=request.param) + final = QNode(final, dev, diff_method=request.param) return dev, final, non_parametrized_layer, a, b, c @@ -300,7 +290,7 @@ def test_evaluate_block_diag_metric_tensor(self, sample_circuit, tol): params = [-0.282203, 0.145554, 0.331624, -0.163907, 0.57662, 0.081272] x, y, z, h, g, f = params - G = circuit.metric_tensor(*params) + G = qml.metric_tensor(circuit)(*params) # ============================================ # Test block diag metric tensor of first layer is correct. @@ -366,7 +356,7 @@ def layer2_diag(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.var(qml.PauliZ(2)), qml.var(qml.PauliY(1)) - layer2_diag = qml.QNode(layer2_diag, dev) + layer2_diag = QNode(layer2_diag, dev) def layer2_off_diag_first_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) @@ -376,7 +366,7 @@ def layer2_off_diag_first_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliY(1)) - layer2_off_diag_first_order = qml.QNode(layer2_off_diag_first_order, dev) + layer2_off_diag_first_order = QNode(layer2_off_diag_first_order, dev) def layer2_off_diag_second_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) @@ -386,7 +376,7 @@ def layer2_off_diag_second_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.expval(qml.Hermitian(np.kron(Z, Y), wires=[2, 1])) - layer2_off_diag_second_order = qml.QNode(layer2_off_diag_second_order, dev) + layer2_off_diag_second_order = QNode(layer2_off_diag_second_order, dev) # calculate the diagonal terms varK0, varK1 = layer2_diag(x, y, z, h, g, f) @@ -425,7 +415,7 @@ def layer3_diag(x, y, z, h, g, f): qml.RY(f, wires=2) return qml.var(qml.PauliX(1)) - layer3_diag = qml.QNode(layer3_diag, dev) + layer3_diag = QNode(layer3_diag, dev) G3 = layer3_diag(x, y, z, h, g, f) / 4 assert np.allclose(G[3:4, 3:4], G3, atol=tol, rtol=0) @@ -443,7 +433,7 @@ def test_evaluate_diag_approx_metric_tensor(self, sample_circuit, tol): params = [-0.282203, 0.145554, 0.331624, -0.163907, 0.57662, 0.081272] x, y, z, h, g, f = params - G = circuit.metric_tensor(*params, diag_approx=True) + G = qml.metric_tensor(circuit, diag_approx=True)(*params) # ============================================ # Test block diag metric tensor of first layer is correct. @@ -490,7 +480,7 @@ def layer2_diag(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.var(qml.PauliZ(2)), qml.var(qml.PauliY(1)) - layer2_diag = qml.QNode(layer2_diag, dev) + layer2_diag = QNode(layer2_diag, dev) # calculate the diagonal terms varK0, varK1 = layer2_diag(x, y, z, h, g, f) @@ -522,7 +512,7 @@ def layer3_diag(x, y, z, h, g, f): qml.RY(f, wires=2) return qml.var(qml.PauliX(1)) - layer3_diag = qml.QNode(layer3_diag, dev) + layer3_diag = QNode(layer3_diag, dev) G3 = layer3_diag(x, y, z, h, g, f) / 4 assert np.allclose(G[3:4, 3:4], G3, atol=tol, rtol=0) @@ -542,7 +532,7 @@ def test_autograd(self, diff_method, tol): """Test metric tensor differentiability in the autograd interface""" dev = qml.device("default.qubit", wires=2) - @qml.qnode(dev, interface="autograd", diff_method=diff_method) + @qnode(dev, interface="autograd", diff_method=diff_method) def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) @@ -553,7 +543,7 @@ def circuit(weights): def cost(weights): return qml.metric_tensor(circuit)(weights)[2, 2] - weights = np.array([0.432, 0.12, -0.432]) + weights = np.array([0.432, 0.12, -0.432], requires_grad=True) a, b, c = weights grad = qml.grad(cost)(weights) @@ -562,6 +552,7 @@ def cost(weights): ) assert np.allclose(grad, expected, atol=tol, rtol=0) + @pytest.mark.xfail def test_jax(self, diff_method, tol): """Test metric tensor differentiability in the JAX interface""" if diff_method == "parameter-shift": @@ -572,7 +563,7 @@ def test_jax(self, diff_method, tol): dev = qml.device("default.qubit.jax", wires=2) - @qml.qnode(dev, interface="jax", diff_method="backprop") + @qnode(dev, interface="jax", diff_method="backprop") def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) @@ -600,7 +591,7 @@ def test_tf(self, diff_method, tol): dev = qml.device("default.qubit", wires=2) - @qml.qnode(dev, interface="tf", diff_method=diff_method) + @qnode(dev, interface="tf", diff_method=diff_method) def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) @@ -627,7 +618,7 @@ def test_torch(self, diff_method, tol): dev = qml.device("default.qubit", wires=2) - @qml.qnode(dev, interface="torch", diff_method=diff_method) + @qnode(dev, interface="torch", diff_method=diff_method) def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) From e0b45f50119b288fe0aa2cd7e28c7867435492e5 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 18:02:37 +0800 Subject: [PATCH 09/52] fixes --- pennylane/gradients/gradient_transform.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index a491a0268e6..1846be0f5da 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -151,11 +151,15 @@ def default_qnode_wrapper(self, qnode, targs, tkwargs): def jacobian_wrapper(*args, **kwargs): qjac = _wrapper(*args, **kwargs) - cjac = cjac_fn(*args, **kwargs) if any(m.return_type is qml.operation.Probability for m in qnode.qtape.measurements): qjac = qml.math.squeeze(qjac) + if not hybrid: + return qjac + + cjac = cjac_fn(*args, **kwargs) + if isinstance(cjac, tuple): # Classical processing of multiple arguments is present. Return qjac @ cjac. jacs = [ @@ -167,7 +171,7 @@ def jacobian_wrapper(*args, **kwargs): is_square = cjac.shape == (1,) or (cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]) - if not hybrid or (is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0]))): + if is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0])): # Classical Jacobian is the identity. No classical processing # is present inside the QNode. return qjac From 523b765903c54f433b150a842c72ed2fa78c3c65 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 22:42:21 +0800 Subject: [PATCH 10/52] add test --- pennylane/transforms/metric_tensor.py | 11 ++------- tests/transforms/test_metric_tensor.py | 31 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/pennylane/transforms/metric_tensor.py b/pennylane/transforms/metric_tensor.py index adb3fc011cb..cc87c48260c 100644 --- a/pennylane/transforms/metric_tensor.py +++ b/pennylane/transforms/metric_tensor.py @@ -39,7 +39,7 @@ def expand_fn(tape): @functools.partial(batch_transform, expand_fn=expand_fn) -def metric_tensor(tape, diag_approx=False, argnum=None): +def metric_tensor(tape, diag_approx=False): """Returns a function that computes the block-diagonal approximation of the metric tensor of a given QNode. @@ -52,9 +52,6 @@ def metric_tensor(tape, diag_approx=False, argnum=None): Args: qnode (.QNode or .QuantumTape): quantum tape or QNode to find the metric tensor of diag_approx (bool): iff True, use the diagonal approximation - argnum (int or list[int] or None): Trainable parameter indices to differentiate - with respect to. If not provided, the derivative with respect to all - trainable indices are returned. hybrid (bool): Specifies whether classical processing inside a QNode should be taken into account when transforming a QNode. @@ -147,10 +144,6 @@ def circuit(weights): [0. , 0.00415023, 0. ], [0. , 0. , 0.24878844]]) """ - - if argnum is not None: - tape.trainable_params = set(argnum) - # get the circuit graph graph = tape.graph @@ -255,7 +248,7 @@ def wrapper(*args, **kwargs): _mt = qml.math.tensordot(c, _mt, [[0], [0]]) metric_tensors.append(_mt) - return metric_tensors + return tuple(metric_tensors) is_square = cjac.shape == (1,) or (cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]) diff --git a/tests/transforms/test_metric_tensor.py b/tests/transforms/test_metric_tensor.py index 720c9d02875..3c4d60f1f96 100644 --- a/tests/transforms/test_metric_tensor.py +++ b/tests/transforms/test_metric_tensor.py @@ -244,6 +244,37 @@ def circuit(a, b, c): ) assert np.allclose(g, np.diag(expected), atol=tol, rtol=0) + def test_evaluate_diag_metric_tensor_classical_processing(self, tol): + """Test that a diagonal metric tensor evaluates correctly + when the QNode includes classical processing.""" + dev = qml.device("default.qubit", wires=2) + + def circuit(a, b): + qml.RX(a[1], wires=0) + qml.RY(a[0], wires=0) + qml.CNOT(wires=[0, 1]) + qml.PhaseShift(b, wires=1) + return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) + + circuit = QNode(circuit, dev) + + a = np.array([0.432, 0.1]) + b = 0.12 + + # evaluate metric tensor + g = qml.metric_tensor(circuit)(a, b) + assert isinstance(g, tuple) + assert len(g) == 2 + assert g[0].shape == (len(a), len(a)) + assert g[1].shape == tuple() + + # check that the metric tensor is correct + expected = np.array([np.cos(a[1]) ** 2, 1]) / 4 + assert np.allclose(g[0], np.diag(expected), atol=tol, rtol=0) + + expected = (3 - 2 * np.cos(a[1]) ** 2 * np.cos(2 * a[0]) - np.cos(2 * a[1])) / 16 + assert np.allclose(g[1], expected, atol=tol, rtol=0) + @pytest.fixture(params=["parameter-shift", "backprop"]) def sample_circuit(self, request): """Sample variational circuit fixture used in the From 754bbe7bb06b663d9c402a06302c21b1905a21d3 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 22:47:10 +0800 Subject: [PATCH 11/52] Integrate batch execution into a QNode --- pennylane/gradients/parameter_shift.py | 18 +- pennylane/new_qnode.py | 428 ++++++++++++ pennylane/transforms/batch_transform.py | 61 +- tests/test_new_qnode.py | 867 ++++++++++++++++++++++++ 4 files changed, 1316 insertions(+), 58 deletions(-) create mode 100644 pennylane/new_qnode.py create mode 100644 tests/test_new_qnode.py diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index d7655420689..6fe6ad83443 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -331,8 +331,22 @@ def var_param_shift(tape, argnum, shift=np.pi / 2, gradient_recipes=None, f0=Non def processing_fn(results): # We need to expand the dimensions of the variance mask, # and convert it to be the same type as the results. - mask = qml.math.convert_like(qml.math.reshape(var_mask, [-1, 1]), results[0]) - f0 = qml.math.expand_dims(results[0], -1) + res = results[0] + ragged = getattr(results[0], "dtype", None) is np.dtype("object") + + mask = [] + for m, r in zip(var_mask, results[0]): + array_func = np.ones if m else np.zeros + shape = qml.math.shape(r) + shape = (1,) if shape == tuple() else shape + mask.append(array_func(shape, dtype=bool)) + + if ragged: + res = qml.math.hstack(res) + mask = qml.math.hstack(mask) + + mask = qml.math.convert_like(qml.math.reshape(mask, [-1, 1]), res) + f0 = qml.math.expand_dims(res, -1) pdA = pdA_fn(results[1:tape_boundary]) pdA2 = 0 diff --git a/pennylane/new_qnode.py b/pennylane/new_qnode.py new file mode 100644 index 00000000000..eb2adef7aeb --- /dev/null +++ b/pennylane/new_qnode.py @@ -0,0 +1,428 @@ +# Copyright 2018-2021 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the QNode class and qnode decorator. +""" +# pylint: disable=too-many-instance-attributes,too-many-arguments,protected-access +from collections.abc import Sequence +import functools +import inspect +import warnings + +import pennylane as qml +from pennylane import Device +from pennylane.interfaces.batch import set_shots, SUPPORTED_INTERFACES + + +class QNode: + """New QNode""" + + def __init__( + self, + func, + device, + interface="autograd", + diff_method="best", + max_expansion=10, + mode="best", + cache=True, + cachesize=10000, + max_diff=1, + **gradient_kwargs, + ): + if interface not in SUPPORTED_INTERFACES: + raise qml.QuantumFunctionError( + f"Unknown interface {interface}. Interface must be " + f"one of {SUPPORTED_INTERFACES}." + ) + + if not isinstance(device, Device): + raise qml.QuantumFunctionError( + "Invalid device. Device must be a valid PennyLane device." + ) + + if "shots" in inspect.signature(func).parameters: + warnings.warn( + "Detected 'shots' as an argument to the given quantum function. " + "The 'shots' argument name is reserved for overriding the number of shots " + "taken by the device. Its use outside of this context should be avoided.", + UserWarning, + ) + self._qfunc_uses_shots_arg = True + else: + self._qfunc_uses_shots_arg = False + + # input arguments + self.func = func + self.device = device + self._interface = interface + self.diff_method = diff_method + self.max_expansion = max_expansion + + # execution keyword arguments + self.execute_kwargs = { + "mode": mode, + "cache": cache, + "cachesize": cachesize, + "max_diff": max_diff, + } + + # internal data attributes + self._tape = None + self._qfunc_output = None + self._gradient_kwargs = gradient_kwargs + self._original_device = device + self.gradient_fn = None + self.gradient_kwargs = None + + self._update_gradient_fn() + functools.update_wrapper(self, func) + + def __repr__(self): + """String representation.""" + detail = "" + return detail.format( + self.device.num_wires, + self.device.short_name, + self.interface, + self.diff_method, + ) + + @property + def interface(self): + """The interface used by the QNode""" + return self._interface + + @interface.setter + def interface(self, value): + if value not in SUPPORTED_INTERFACES: + raise qml.QuantumFunctionError( + f"Unknown interface {value}. Interface must be " f"one of {SUPPORTED_INTERFACES}." + ) + + self._interface = value + self._update_gradient_fn() + + def _update_gradient_fn(self): + if self.diff_method is None: + self._interface = None + self.gradient_fn = None + self.gradient_kwargs = {} + return + + self.gradient_fn, self.gradient_kwargs, self.device = self.get_gradient_fn( + self._original_device, self.interface, self.diff_method + ) + self.gradient_kwargs.update(self._gradient_kwargs or {}) + + def _update_original_device(self): + # FIX: If the qnode swapped the device, increase the num_execution value on the original device. + # In the long run, we should make sure that the user's device is the one + # actually run so she has full control. This could be done by changing the class + # of the user's device before and after executing the tape. + + if self.device is not self._original_device: + self._original_device._num_executions += 1 # pylint: disable=protected-access + + # Update for state vector simulators that have the _pre_rotated_state attribute + if hasattr(self._original_device, "_pre_rotated_state"): + self._original_device._pre_rotated_state = self.device._pre_rotated_state + + # Update for state vector simulators that have the _state attribute + if hasattr(self._original_device, "_state"): + self._original_device._state = self.device._state + + # pylint: disable=too-many-return-statements + @staticmethod + def get_gradient_fn(device, interface, diff_method="best"): + """Determine the best differentiation method, interface, and device + for a requested device, interface, and diff method. + + Args: + device (.Device): PennyLane device + interface (str): name of the requested interface + diff_method (str or .gradient_transform): The requested method of differentiation. + If a string, one of ``"best"``, ``"backprop"``, ``"adjoint"``, ``"device"``, + ``"parameter-shift"``, or ``"finite-diff"``. A gradient transform may + also be passed here. + + Returns: + tuple[str or .gradient_transform, dict, .Device: Tuple containing the ``gradient_fn``, + ``gradient_kwargs``, and the device to use when calling the execute function. + """ + + if diff_method == "best": + return QNode.get_best_method(device, interface) + + if diff_method == "backprop": + return QNode._validate_backprop_method(device, interface) + + if diff_method == "adjoint": + return QNode._validate_adjoint_method(device) + + if diff_method == "device": + return QNode._validate_device_method(device) + + if diff_method == "parameter-shift": + return QNode._validate_parameter_shift(device) + + if diff_method == "finite-diff": + return qml.gradients.finite_diff, {}, device + + if isinstance(diff_method, str): + raise qml.QuantumFunctionError( + f"Differentiation method {diff_method} not recognized. Allowed " + "options are ('best', 'parameter-shift', 'backprop', 'finite-diff', 'device', 'reversible', 'adjoint')." + ) + + if isinstance(diff_method, qml.gradients.gradient_transform): + return diff_method, {}, device + + raise qml.QuantumFunctionError( + f"Differentiation method {diff_method} must be a gradient transform or a string." + ) + + @staticmethod + def get_best_method(device, interface): + """Returns the 'best' differentiation method + for a particular device and interface combination. + + This method attempts to determine support for differentiation + methods using the following order: + + * ``"device"`` + * ``"backprop"`` + * ``"parameter-shift"`` + * ``"finite-diff"`` + + The first differentiation method that is supported (going from + top to bottom) will be returned. + + Args: + device (.Device): PennyLane device + interface (str): name of the requested interface + + Returns: + tuple[str or .gradient_transform, dict, .Device: Tuple containing the ``gradient_fn``, + ``gradient_kwargs``, and the device to use when calling the execute function. + """ + try: + return QNode._validate_device_method(device) + except qml.QuantumFunctionError: + try: + return QNode._validate_backprop_method(device, interface) + except qml.QuantumFunctionError: + try: + return QNode._validate_parameter_shift(device) + except qml.QuantumFunctionError: + return qml.gradients.finite_diff, {}, device + + @staticmethod + def _validate_backprop_method(device, interface): + # determine if the device supports backpropagation + backprop_interface = device.capabilities().get("passthru_interface", None) + + # determine if the device has any child devices that support backpropagation + backprop_devices = device.capabilities().get("passthru_devices", None) + + if backprop_interface is not None: + # device supports backpropagation natively + + if interface == backprop_interface: + return "backprop", {}, device + + raise qml.QuantumFunctionError( + f"Device {device.short_name} only supports diff_method='backprop' when using the " + f"{backprop_interface} interface." + ) + + if device.shots is None and backprop_devices is not None: + + # device is analytic and has child devices that support backpropagation natively + + if interface in backprop_devices: + # TODO: need a better way of passing existing device init options + # to a new device? + device = qml.device( + backprop_devices[interface], + wires=device.wires, + shots=device.shots, + ) + return "backprop", {}, device + + raise qml.QuantumFunctionError( + f"Device {device.short_name} only supports diff_method='backprop' when using the " + f"{list(backprop_devices.keys())} interfaces." + ) + + raise qml.QuantumFunctionError( + f"The {device.short_name} device does not support native computations with " + "autodifferentiation frameworks." + ) + + @staticmethod + def _validate_adjoint_method(device): + # The conditions below provide a minimal set of requirements that we can likely improve upon in + # future, or alternatively summarize within a single device capability. Moreover, we also + # need to inspect the circuit measurements to ensure only expectation values are taken. This + # cannot be done here since we don't yet know the composition of the circuit. + + supported_device = hasattr(device, "_apply_operation") + supported_device = supported_device and hasattr(device, "_apply_unitary") + supported_device = supported_device and device.capabilities().get("returns_state") + supported_device = supported_device and hasattr(device, "adjoint_jacobian") + + if not supported_device: + raise ValueError( + f"The {device.short_name} device does not support adjoint differentiation." + ) + + if device.shots is not None: + warnings.warn( + "Requested adjoint differentiation to be computed with finite shots." + " Adjoint differentiation always calculated exactly.", + UserWarning, + ) + + return "device", {"use_device_state": True, "method": "adjoint_jacobian"}, device + + @staticmethod + def _validate_device_method(device): + # determine if the device provides its own jacobian method + provides_jacobian = device.capabilities().get("provides_jacobian", False) + + if not provides_jacobian: + raise qml.QuantumFunctionError( + f"The {device.short_name} device does not provide a native " + "method for computing the jacobian." + ) + + return "device", {}, device + + @staticmethod + def _validate_parameter_shift(device): + model = device.capabilities().get("model", None) + + if model == "qubit": + return qml.gradients.param_shift, {}, device + + if model == "cv": + return qml.gradients.param_shift_cv, {"dev": device}, device + + raise qml.QuantumFunctionError( + f"Device {device.short_name} uses an unknown model ('{model}') " + "that does not support the parameter-shift rule." + ) + + @property + def tape(self): + """The quantum tape""" + return self._tape + + qtape = tape # for backwards compatibility + + def construct(self, args, kwargs): + """Call the quantum function with a tape context, ensuring the operations get queued.""" + + if self.interface == "autograd": + # HOTFIX: to maintain compatibility with core, here we treat + # all inputs that do not explicitly specify `requires_grad=False` + # as trainable. This should be removed at some point, forcing users + # to specify `requires_grad=True` for trainable parameters. + args = [ + qml.numpy.array(a, requires_grad=True) if not hasattr(a, "requires_grad") else a + for a in args + ] + + self._tape = qml.tape.JacobianTape() + + with self.tape: + self._qfunc_output = self.func(*args, **kwargs) + + params = self.tape.get_parameters(trainable_only=False) + self.tape.trainable_params = qml.math.get_trainable_indices(params) + + if not isinstance(self._qfunc_output, Sequence): + measurement_processes = (self._qfunc_output,) + else: + measurement_processes = self._qfunc_output + + if not all(isinstance(m, qml.measure.MeasurementProcess) for m in measurement_processes): + raise qml.QuantumFunctionError( + "A quantum function must return either a single measurement, " + "or a nonempty sequence of measurements." + ) + + if not all(ret == m for ret, m in zip(measurement_processes, self.tape.measurements)): + raise qml.QuantumFunctionError( + "All measurements must be returned in the order they are measured." + ) + + for obj in self.tape.operations + self.tape.observables: + + if getattr(obj, "num_wires", None) is qml.operation.WiresEnum.AllWires: + # check here only if enough wires + if len(obj.wires) != self.device.num_wires: + raise qml.QuantumFunctionError( + "Operator {} must act on all wires".format(obj.name) + ) + + def __call__(self, *args, **kwargs): + override_shots = False + + if not self._qfunc_uses_shots_arg: + # If shots specified in call but not in qfunc signature, + # interpret it as device shots value for this call. + override_shots = kwargs.pop("shots", False) + + if override_shots is not False: + # Since shots has changed, we need to update the preferred gradient function. + # This is because the gradient function chosen at initialization may + # no longer be applicable. + + # store the initialization gradient function + original_grad_fn = [self.gradient_fn, self.gradient_kwargs, self.device] + + # update the gradient function + set_shots(self._original_device, override_shots)(self._update_gradient_fn)() + + # construct the tape + self.construct(args, kwargs) + + res = qml.execute( + [self.tape], + device=self.device, + gradient_fn=self.gradient_fn, + interface=self.interface, + gradient_kwargs=self.gradient_kwargs, + override_shots=override_shots, + **self.execute_kwargs, + )[0] + + if override_shots is not False: + # restore the initialization gradient function + self.gradient_fn, self.gradient_kwargs, self.device = original_grad_fn + + self._update_original_device() + + if isinstance(self._qfunc_output, Sequence) or ( + self.tape.is_sampled and self.device._has_partitioned_shots() + ): + return res + + return qml.math.squeeze(res) + + +qnode = lambda dev, **kwargs: functools.partial(QNode, device=dev, **kwargs) +qnode = functools.update_wrapper(qnode, QNode) diff --git a/pennylane/transforms/batch_transform.py b/pennylane/transforms/batch_transform.py index 5c7999ce092..c41d5252cd8 100644 --- a/pennylane/transforms/batch_transform.py +++ b/pennylane/transforms/batch_transform.py @@ -14,9 +14,9 @@ """Contains tools and decorators for registering batch transforms.""" # pylint: disable=too-few-public-methods import functools -import types import pennylane as qml +from pennylane.new_qnode import QNode class batch_transform: @@ -159,60 +159,9 @@ def __init__(self, transform_fn, expand_fn=None, differentiable=True): self.transform_fn = transform_fn self.expand_fn = expand_fn self.differentiable = differentiable - self.qnode_wrapper = self.default_qnode_wrapper functools.update_wrapper(self, transform_fn) - def custom_qnode_wrapper(self, fn): - """Register a custom QNode execution wrapper function - for the batch transform. - - **Example** - - .. code-block:: python - - def my_transform(tape, *targs, **tkwargs): - ... - return tapes, processing_fn - - @my_transform.custom_qnode_wrapper - def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): - def wrapper_fn(*args, **kwargs): - # construct QNode - qnode.construct(args, kwargs) - # apply transform to QNode's tapes - tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs) - # execute tapes and return processed result - ... - return processing_fn(results) - return wrapper_fn - - The custom QNode execution wrapper must have arguments - ``self`` (the batch transform object), ``qnode`` (the input QNode - to transform and execute), ``targs`` and ``tkwargs`` (the transform - arguments and keyword arguments respectively). - - It should return a callable object that accepts the *same* arguments - as the QNode, and returns the transformed numerical result. - - The default :meth:`~.default_qnode_wrapper` method may be called - if only pre- or post-processing dependent on QNode arguments is required: - - .. code-block:: python - - @my_transform.custom_qnode_wrapper - def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): - transformed_qnode = self.default_qnode_wrapper(qnode) - - def wrapper_fn(*args, **kwargs): - args, kwargs = pre_process(args, kwargs) - res = transformed_qnode(*args, **kwargs) - ... - return ... - return wrapper_fn - """ - self.qnode_wrapper = types.MethodType(fn, self) - - def default_qnode_wrapper(self, qnode, targs, tkwargs): + def qnode_execution_wrapper(self, qnode, targs, tkwargs): """A wrapper method that takes a QNode and transform arguments, and returns a function that 'wraps' the QNode execution. @@ -257,10 +206,10 @@ def __call__(self, qnode, *targs, **tkwargs): # tapes, fn = some_transform(tape, *transform_args) return self.construct(qnode, *targs, **tkwargs) - if isinstance(qnode, (qml.QNode, qml.ExpvalCost)): + if isinstance(qnode, (qml.QNode, QNode)): # Input is a QNode: # result = some_transform(qnode, *transform_args)(*qnode_args) - wrapper = self.qnode_wrapper(qnode, targs, tkwargs) + wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) wrapper = functools.wraps(qnode)(wrapper) else: @@ -282,7 +231,7 @@ def __call__(self, qnode, *targs, **tkwargs): targs = (qnode,) + targs def wrapper(qnode): - _wrapper = self.qnode_wrapper(qnode, targs, tkwargs) + _wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) _wrapper = functools.wraps(qnode)(_wrapper) return _wrapper diff --git a/tests/test_new_qnode.py b/tests/test_new_qnode.py new file mode 100644 index 00000000000..59eff9a3773 --- /dev/null +++ b/tests/test_new_qnode.py @@ -0,0 +1,867 @@ +# Copyright 2018-2021 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests for the QNode""" +from collections import defaultdict +import pytest +import numpy as np + +import pennylane as qml +from pennylane import numpy as pnp +from pennylane.new_qnode import qnode, QNode +from pennylane.transforms import draw +from pennylane.tape import JacobianTape + + +def dummyfunc(): + return None + + +class TestValidation: + """Tests for QNode creation and validation""" + + def test_invalid_interface(self): + """Test that an exception is raised for an invalid interface""" + dev = qml.device("default.qubit", wires=1) + test_interface = "something" + expected_error = ( + fr"Unknown interface {test_interface}\. Interface must be " + r"one of \[None, 'autograd', 'numpy', 'torch', 'pytorch', 'tf', 'tensorflow'\]\." + ) + + with pytest.raises(qml.QuantumFunctionError, match=expected_error): + QNode(dummyfunc, dev, interface="something") + + def test_changing_invalid_interface(self): + """Test that an exception is raised for an invalid interface + on a pre-existing""" + dev = qml.device("default.qubit", wires=1) + test_interface = "something" + + @qnode(dev) + def circuit(x): + qml.RX(wires=0) + return qml.probs(wires=0) + + expected_error = ( + fr"Unknown interface {test_interface}\. Interface must be " + r"one of \[None, 'autograd', 'numpy', 'torch', 'pytorch', 'tf', 'tensorflow'\]\." + ) + + with pytest.raises(qml.QuantumFunctionError, match=expected_error): + circuit.interface = test_interface + + def test_invalid_device(self): + """Test that an exception is raised for an invalid device""" + with pytest.raises(qml.QuantumFunctionError, match="Invalid device"): + QNode(dummyfunc, None) + + def test_validate_device_method(self, monkeypatch): + """Test that the method for validating the device diff method + tape works as expected""" + dev = qml.device("default.qubit", wires=1) + + with pytest.raises( + qml.QuantumFunctionError, + match="does not provide a native method for computing the jacobian", + ): + QNode._validate_device_method(dev) + + monkeypatch.setitem(dev._capabilities, "provides_jacobian", True) + method, diff_options, device = QNode._validate_device_method(dev) + + assert method == "device" + assert device is dev + + def test_validate_backprop_method_invalid_device(self): + """Test that the method for validating the backprop diff method + tape raises an exception if the device does not support backprop.""" + dev = qml.device("default.gaussian", wires=1) + + with pytest.raises(qml.QuantumFunctionError, match="does not support native computations"): + QNode._validate_backprop_method(dev, None) + + def test_validate_backprop_method_invalid_interface(self, monkeypatch): + """Test that the method for validating the backprop diff method + tape raises an exception if the wrong interface is provided""" + dev = qml.device("default.qubit", wires=1) + test_interface = "something" + + monkeypatch.setitem(dev._capabilities, "passthru_interface", test_interface) + + with pytest.raises(qml.QuantumFunctionError, match=f"when using the {test_interface}"): + QNode._validate_backprop_method(dev, None) + + def test_validate_backprop_method(self, monkeypatch): + """Test that the method for validating the backprop diff method + tape works as expected""" + dev = qml.device("default.qubit", wires=1) + test_interface = "something" + monkeypatch.setitem(dev._capabilities, "passthru_interface", test_interface) + + method, diff_options, device = QNode._validate_backprop_method(dev, "something") + + assert method == "backprop" + assert device is dev + + def test_validate_backprop_child_method(self, monkeypatch): + """Test that the method for validating the backprop diff method + tape works as expected if a child device supports backprop""" + dev = qml.device("default.qubit", wires=1) + test_interface = "something" + + orig_capabilities = dev.capabilities().copy() + orig_capabilities["passthru_devices"] = {test_interface: "default.gaussian"} + monkeypatch.setattr(dev, "capabilities", lambda: orig_capabilities) + + method, diff_options, device = QNode._validate_backprop_method(dev, test_interface) + + assert method == "backprop" + assert isinstance(device, qml.devices.DefaultGaussian) + + def test_validate_backprop_child_method_wrong_interface(self, monkeypatch): + """Test that the method for validating the backprop diff method + tape raises an error if a child device supports backprop but using a different interface""" + dev = qml.device("default.qubit", wires=1) + test_interface = "something" + + orig_capabilities = dev.capabilities().copy() + orig_capabilities["passthru_devices"] = {test_interface: "default.gaussian"} + monkeypatch.setattr(dev, "capabilities", lambda: orig_capabilities) + + with pytest.raises( + qml.QuantumFunctionError, match=r"when using the \['something'\] interface" + ): + QNode._validate_backprop_method(dev, "another_interface") + + def test_parameter_shift_qubit_device(self): + """Test that the _validate_parameter_shift method + returns the correct gradient transform for qubit devices.""" + dev = qml.device("default.qubit", wires=1) + gradient_fn = QNode._validate_parameter_shift(dev) + assert gradient_fn[0] is qml.gradients.param_shift + + def test_parameter_shift_cv_device(self): + """Test that the _validate_parameter_shift method + returns the correct gradient transform for cv devices.""" + dev = qml.device("default.gaussian", wires=1) + gradient_fn = QNode._validate_parameter_shift(dev) + assert gradient_fn[0] is qml.gradients.param_shift_cv + assert gradient_fn[1] == {"dev": dev} + + def test_parameter_shift_tape_unknown_model(self, monkeypatch): + """test that an unknown model raises an exception""" + + def capabilities(cls): + capabilities = cls._capabilities + capabilities.update(model="None") + return capabilities + + monkeypatch.setattr(qml.devices.DefaultQubit, "capabilities", capabilities) + dev = qml.device("default.qubit", wires=1) + + with pytest.raises( + qml.QuantumFunctionError, match="does not support the parameter-shift rule" + ): + QNode._validate_parameter_shift(dev) + + def test_best_method(self, monkeypatch): + """Test that the method for determining the best diff method + for a given device and interface works correctly""" + dev = qml.device("default.qubit", wires=1) + monkeypatch.setitem(dev._capabilities, "passthru_interface", "some_interface") + monkeypatch.setitem(dev._capabilities, "provides_jacobian", True) + + # device is top priority + res = QNode.get_best_method(dev, "another_interface") + assert res == ("device", {}, dev) + + # backprop is next priority + monkeypatch.setitem(dev._capabilities, "provides_jacobian", False) + res = QNode.get_best_method(dev, "some_interface") + assert res == ("backprop", {}, dev) + + # The next fallback is parameter-shift. + res = QNode.get_best_method(dev, "another_interface") + assert res == (qml.gradients.param_shift, {}, dev) + + # finally, if both fail, finite differences is the fallback + def capabilities(cls): + capabilities = cls._capabilities + capabilities.update(model="None") + return capabilities + + monkeypatch.setattr(qml.devices.DefaultQubit, "capabilities", capabilities) + res = QNode.get_best_method(dev, "another_interface") + assert res == (qml.gradients.finite_diff, {}, dev) + + def test_diff_method(self, mocker): + """Test that a user-supplied diff-method correctly returns the right + diff method.""" + dev = qml.device("default.qubit", wires=1) + + mock_best = mocker.patch("pennylane.new_qnode.QNode.get_best_method") + mock_best.return_value = ("best", {}, dev) + + mock_backprop = mocker.patch("pennylane.new_qnode.QNode._validate_backprop_method") + mock_backprop.return_value = ("backprop", {}, dev) + + mock_device = mocker.patch("pennylane.new_qnode.QNode._validate_device_method") + mock_device.return_value = ("device", {}, dev) + + qn = QNode(dummyfunc, dev, diff_method="best") + assert qn.diff_method == "best" + assert qn.gradient_fn == "best" + + qn = QNode(dummyfunc, dev, diff_method="backprop") + assert qn.diff_method == "backprop" + assert qn.gradient_fn == "backprop" + mock_backprop.assert_called_once() + + qn = QNode(dummyfunc, dev, diff_method="device") + assert qn.diff_method == "device" + assert qn.gradient_fn == "device" + mock_device.assert_called_once() + + qn = QNode(dummyfunc, dev, diff_method="finite-diff") + assert qn.diff_method == "finite-diff" + assert qn.gradient_fn is qml.gradients.finite_diff + + qn = QNode(dummyfunc, dev, diff_method="parameter-shift") + assert qn.diff_method == "parameter-shift" + assert qn.gradient_fn is qml.gradients.param_shift + + # check that get_best_method was only ever called once + mock_best.assert_called_once() + + def test_unknown_diff_method_string(self): + """Test that an exception is raised for an unknown differentiation method string""" + dev = qml.device("default.qubit", wires=1) + + with pytest.raises( + qml.QuantumFunctionError, match="Differentiation method hello not recognized" + ): + QNode(dummyfunc, dev, diff_method="hello") + + def test_unknown_diff_method_type(self): + """Test that an exception is raised for an unknown differentiation method type""" + dev = qml.device("default.qubit", wires=1) + + with pytest.raises( + qml.QuantumFunctionError, + match="Differentiation method 5 must be a gradient transform or a string", + ): + QNode(dummyfunc, dev, diff_method=5) + + def test_validate_adjoint_invalid_device(self): + """Test if a ValueError is raised when an invalid device is provided to + _validate_adjoint_method""" + + dev = qml.device("default.gaussian", wires=1) + + with pytest.raises(ValueError, match="The default.gaussian device does not"): + QNode._validate_adjoint_method(dev) + + def test_validate_adjoint_finite_shots(self): + """Test that a UserWarning is raised when device has finite shots""" + + dev = qml.device("default.qubit", wires=1, shots=1) + + with pytest.warns( + UserWarning, match="Requested adjoint differentiation to be computed with finite shots." + ): + QNode._validate_adjoint_method(dev) + + def test_adjoint_finite_shots(self): + """Tests that UserWarning is raised with the adjoint differentiation method + on QNode construction when the device has finite shots + """ + + dev = qml.device("default.qubit", wires=1, shots=1) + + with pytest.warns( + UserWarning, match="Requested adjoint differentiation to be computed with finite shots." + ): + + @qnode(dev, diff_method="adjoint") + def circ(): + return qml.expval(qml.PauliZ(0)) + + def test_qnode_print(self): + """Test that printing a QNode object yields the right information.""" + dev = qml.device("default.qubit", wires=1) + + def func(x): + qml.RX(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + qn = QNode(func, dev) + + assert ( + qn.__repr__() + == "" + ) + + def test_diff_method_none(self, tol): + """Test that diff_method=None creates a QNode with no interface, and no + device swapping.""" + dev = qml.device("default.qubit", wires=1) + + @qnode(dev, diff_method=None) + def circuit(x): + qml.RX(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + assert circuit.interface is None + assert circuit.gradient_fn is None + assert circuit.device is dev + + # QNode can still be executed + assert np.allclose(circuit(0.5), np.cos(0.5), atol=tol, rtol=0) + + with pytest.warns(UserWarning, match="Output seems independent of input"): + grad = qml.grad(circuit)(0.5) + + assert np.allclose(grad, 0) + + +class TestTapeConstruction: + """Tests for the tape construction""" + + def test_basic_tape_construction(self, tol): + """Test that a quantum tape is properly constructed""" + dev = qml.device("default.qubit", wires=2) + + def func(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + qn = QNode(func, dev) + + x = 0.12 + y = 0.54 + + res = qn(x, y) + + assert isinstance(qn.qtape, JacobianTape) + assert len(qn.qtape.operations) == 3 + assert len(qn.qtape.observables) == 1 + assert qn.qtape.num_params == 2 + + expected = qn.qtape.execute(dev) + assert np.allclose(res, expected, atol=tol, rtol=0) + + # when called, a new quantum tape is constructed + old_tape = qn.qtape + res2 = qn(x, y) + + assert np.allclose(res, res2, atol=tol, rtol=0) + assert qn.qtape is not old_tape + + def test_jacobian(self, tol): + """Test the jacobian computation""" + dev = qml.device("default.qubit", wires=2) + + def func(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=0), qml.probs(wires=1) + + qn = QNode(func, dev, diff_method="finite-diff", h=1e-8, approx_order=2) + assert qn.gradient_kwargs["h"] == 1e-8 + assert qn.gradient_kwargs["approx_order"] == 2 + + jac = qn.gradient_fn(qn)(0.45, 0.1) + assert jac.shape == (2, 2, 2) + + def test_returning_non_measurements(self): + """Test that an exception is raised if a non-measurement + is returned from the QNode.""" + dev = qml.device("default.qubit", wires=2) + + def func(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return 5 + + qn = QNode(func, dev) + + with pytest.raises( + qml.QuantumFunctionError, match="must return either a single measurement" + ): + qn(5, 1) + + def func(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), 5 + + qn = QNode(func, dev) + + with pytest.raises( + qml.QuantumFunctionError, match="must return either a single measurement" + ): + qn(5, 1) + + def test_inconsistent_measurement_order(self): + """Test that an exception is raised if measurements are returned in an + order different to how they were queued on the tape""" + dev = qml.device("default.qubit", wires=2) + + def func(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + m = qml.expval(qml.PauliZ(0)) + return qml.expval(qml.PauliX(1)), m + + qn = QNode(func, dev) + + with pytest.raises( + qml.QuantumFunctionError, + match="measurements must be returned in the order they are measured", + ): + qn(5, 1) + + def test_consistent_measurement_order(self): + """Test evaluation exceeds as expected if measurements are returned in the + same order to how they were queued on the tape""" + dev = qml.device("default.qubit", wires=2) + + def func(x, y): + global op1, op2, op3, m1, m2 + op1 = qml.RX(x, wires=0) + op2 = qml.RY(y, wires=1) + op3 = qml.CNOT(wires=[0, 1]) + m1 = qml.expval(qml.PauliZ(0)) + m2 = qml.expval(qml.PauliX(1)) + return [m1, m2] + + qn = QNode(func, dev) + qn(5, 1) # evaluate the QNode + assert qn.qtape.operations == [op1, op2, op3] + assert qn.qtape.measurements == [m1, m2] + + @pytest.mark.xfail + def test_multiple_observables_same_wire_expval(self, mocker): + """Test that the QNode supports returning expectation values of observables that are on the + same wire (provided that they are Pauli words and qubit-wise commuting)""" + dev = qml.device("default.qubit", wires=3) + + w = np.random.random((2, 3, 3)) + + @qnode(dev) + def f(w): + qml.templates.StronglyEntanglingLayers(w, wires=range(3)) + return ( + qml.expval(qml.PauliX(0)), + qml.expval(qml.PauliX(0) @ qml.PauliZ(1)), + qml.expval(qml.PauliX(2)), + ) + + spy = mocker.spy(qml.devices.DefaultQubit, "apply") + res = f(w) + spy.assert_called_once() + + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliX(2)] + qnodes = qml.map(qml.templates.StronglyEntanglingLayers, obs, dev) + res_2 = qnodes(w) + + assert np.allclose(res, res_2) + + @pytest.mark.xfail + def test_multiple_observables_same_wire_mixed(self, mocker): + """Test that the QNode supports returning observables that are on the + same wire but with different return types (provided that the observables are Pauli words and + qubit-wise commuting)""" + dev = qml.device("default.qubit", wires=3) + + w = np.random.random((2, 3, 3)) + + @qnode(dev) + def f(w): + qml.templates.StronglyEntanglingLayers(w, wires=range(3)) + return qml.expval(qml.PauliX(0)), qml.var(qml.PauliX(0) @ qml.PauliZ(1)) + + spy = mocker.spy(qml.devices.DefaultQubit, "apply") + res = f(w) + spy.assert_called_once() + + q1 = qml.map(qml.templates.StronglyEntanglingLayers, [qml.PauliX(0)], dev, measure="expval") + q2 = qml.map( + qml.templates.StronglyEntanglingLayers, + [qml.PauliX(0) @ qml.PauliZ(1)], + dev, + measure="var", + ) + + res_2 = np.array([q1(w), q2(w)]).squeeze() + + assert np.allclose(res, res_2) + + def test_operator_all_wires(self, monkeypatch, tol): + """Test that a operator that must act on all wires + does, or raises an error.""" + monkeypatch.setattr(qml.RX, "num_wires", qml.operation.AllWires) + + def circuit(x): + qml.RX(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + dev = qml.device("default.qubit", wires=2) + qnode = QNode(circuit, dev) + + with pytest.raises(qml.QuantumFunctionError, match="Operator RX must act on all wires"): + qnode(0.5) + + dev = qml.device("default.qubit", wires=1) + qnode = QNode(circuit, dev) + assert np.allclose(qnode(0.5), np.cos(0.5), atol=tol, rtol=0) + + +class TestDecorator: + """Unittests for the decorator""" + + def test_decorator(self, tol): + """Test that the decorator correctly creates a QNode.""" + dev = qml.device("default.qubit", wires=2) + + @qnode(dev) + def func(x, y): + """My function docstring""" + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + assert isinstance(func, QNode) + assert func.__doc__ == "My function docstring" + + x = 0.12 + y = 0.54 + + res = func(x, y) + + assert isinstance(func.qtape, JacobianTape) + assert len(func.qtape.operations) == 3 + assert len(func.qtape.observables) == 1 + assert func.qtape.num_params == 2 + + expected = func.qtape.execute(dev) + assert np.allclose(res, expected, atol=tol, rtol=0) + + # when called, a new quantum tape is constructed + old_tape = func.qtape + res2 = func(x, y) + + assert np.allclose(res, res2, atol=tol, rtol=0) + assert func.qtape is not old_tape + + +class TestIntegration: + """Integration tests.""" + + def test_correct_number_of_executions_autograd(self): + """Test that number of executions are tracked in the autograd interface.""" + + def func(): + qml.Hadamard(wires=0) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + dev = qml.device("default.qubit", wires=2) + qn = QNode(func, dev, interface="autograd") + + for i in range(2): + qn() + + assert dev.num_executions == 2 + + qn2 = QNode(func, dev, interface="autograd") + for i in range(3): + qn2() + + assert dev.num_executions == 5 + + def test_correct_number_of_executions_tf(self): + """Test that number of executions are tracked in the tf interface.""" + tf = pytest.importorskip("tf") + + def func(): + qml.Hadamard(wires=0) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + dev = qml.device("default.qubit", wires=2) + qn = QNode(func, dev, interface="tf") + for i in range(2): + qn() + + assert dev.num_executions == 2 + + qn2 = QNode(func, dev, interface="tf") + for i in range(3): + qn2() + + assert dev.num_executions == 5 + + # qubit of different interface + qn3 = QNode(func, dev, interface="autograd") + qn3() + + assert dev.num_executions == 6 + + def test_correct_number_of_executions_torch(self): + """Test that number of executions are tracked in the torch interface.""" + torch = pytest.importorskip("torch") + + def func(): + qml.Hadamard(wires=0) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + dev = qml.device("default.qubit", wires=2) + qn = QNode(func, dev, interface="torch") + for i in range(2): + qn() + + assert dev.num_executions == 2 + + qn2 = QNode(func, dev, interface="torch") + for i in range(3): + qn2() + + assert dev.num_executions == 5 + + # qubit of different interface + qn3 = QNode(func, dev, interface="autograd") + qn3() + + assert dev.num_executions == 6 + + @pytest.mark.parametrize("diff_method", ["parameter-shift", "finite-diff"]) + def test_single_expectation_value_with_argnum_one(self, diff_method, tol): + """Tests correct output shape and evaluation for a QNode + with a single expval output where only one parameter is chosen to + estimate the jacobian. + + This test relies on the fact that exactly one term of the estimated + jacobian will match the expected analytical value. + """ + from pennylane import numpy as anp + + dev = qml.device("default.qubit", wires=2) + + x = anp.array(0.543, requires_grad=True) + y = anp.array(-0.654, requires_grad=True) + + @qnode( + dev, diff_method=diff_method, argnum=[1] + ) # <--- we only choose one trainable parameter + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + res = qml.grad(circuit)(x, y) + assert len(res) == 2 + + expected = (0, np.cos(y) * np.cos(x)) + res = res + expected = expected + + assert np.allclose(res, expected, atol=tol, rtol=0) + + +class TestShots: + """Unittests for specifying shots per call.""" + + def test_specify_shots_per_call_sample(self): + """Tests that shots can be set per call for a sample return type.""" + dev = qml.device("default.qubit", wires=1, shots=10) + + @qnode(dev) + def circuit(a): + qml.RX(a, wires=0) + return qml.sample(qml.PauliZ(wires=0)) + + assert len(circuit(0.8)) == 10 + assert len(circuit(0.8, shots=2)) == 2 + assert len(circuit(0.8, shots=3178)) == 3178 + assert len(circuit(0.8)) == 10 + + def test_specify_shots_per_call_expval(self): + """Tests that shots can be set per call for an expectation value. + Note: this test has a vanishingly small probability to fail.""" + dev = qml.device("default.qubit", wires=1, shots=None) + + @qnode(dev) + def circuit(): + qml.Hadamard(wires=0) + return qml.expval(qml.PauliZ(wires=0)) + + # check that the circuit is analytic + res1 = [circuit() for _ in range(100)] + assert np.std(res1) == 0.0 + assert circuit.device._shots is None + + # check that the circuit is temporary non-analytic + res1 = [circuit(shots=1) for _ in range(100)] + assert np.std(res1) != 0.0 + + # check that the circuit is analytic again + res1 = [circuit() for _ in range(100)] + assert np.std(res1) == 0.0 + assert circuit.device._shots is None + + def test_no_shots_per_call_if_user_has_shots_qfunc_kwarg(self): + """Tests that the per-call shots overwriting is suspended if user + has a shots keyword argument, but a warning is raised.""" + + dev = qml.device("default.qubit", wires=2, shots=10) + + def circuit(a, shots=0): + qml.RX(a, wires=shots) + return qml.sample(qml.PauliZ(wires=0)) + + with pytest.warns( + UserWarning, match="The 'shots' argument name is reserved for overriding" + ): + circuit = QNode(circuit, dev) + + assert len(circuit(0.8)) == 10 + assert circuit.qtape.operations[0].wires.labels == (0,) + + assert len(circuit(0.8, shots=1)) == 10 + assert circuit.qtape.operations[0].wires.labels == (1,) + + assert len(circuit(0.8, shots=0)) == 10 + assert circuit.qtape.operations[0].wires.labels == (0,) + + def test_no_shots_per_call_if_user_has_shots_qfunc_arg(self): + """Tests that the per-call shots overwriting is suspended + if user has a shots argument, but a warning is raised.""" + + # Todo: use standard creation of qnode below for both asserts once we do not parse args to tensors any more + dev = qml.device("default.qubit", wires=[qml.numpy.array(0), qml.numpy.array(1)], shots=10) + + def circuit(a, shots): + qml.RX(a, wires=shots) + return qml.sample(qml.PauliZ(wires=qml.numpy.array(0))) + + # assert that warning is still raised + with pytest.warns( + UserWarning, match="The 'shots' argument name is reserved for overriding" + ): + circuit = QNode(circuit, dev) + + assert len(circuit(0.8, 1)) == 10 + assert circuit.qtape.operations[0].wires.labels == (1,) + + dev = qml.device("default.qubit", wires=2, shots=10) + + with pytest.warns( + UserWarning, match="The 'shots' argument name is reserved for overriding" + ): + + @qnode(dev) + def circuit(a, shots): + qml.RX(a, wires=shots) + return qml.sample(qml.PauliZ(wires=0)) + + assert len(circuit(0.8, shots=0)) == 10 + assert circuit.qtape.operations[0].wires.labels == (0,) + + @pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"]) + def test_shots_setting_does_not_mutate_device(self, diff_method): + """Tests that per-call shots setting does not change the number of shots in the device.""" + + dev = qml.device("default.qubit", wires=1, shots=3) + + @qnode(dev) + def circuit(a): + qml.RX(a, wires=0) + return qml.sample(qml.PauliZ(wires=0)) + + assert dev.shots == 3 + res = circuit(0.8, shots=2) + assert len(res) == 2 + assert dev.shots == 3 + + +@pytest.mark.xfail +class TestSpecs: + """Tests for the qnode property specs""" + + def test_specs_error(self): + """Tests an error is raised if the tape is not constructed.""" + + dev = qml.device("default.qubit", wires=4) + + @qnode(dev) + def circuit(): + return qml.expval(qml.PauliZ(0)) + + with pytest.raises(qml.QuantumFunctionError, match=r"The QNode specifications"): + circuit.specs + + @pytest.mark.parametrize( + "diff_method, len_info", [("backprop", 10), ("parameter-shift", 12), ("adjoint", 11)] + ) + def test_specs(self, diff_method, len_info): + """Tests the specs property with backprop""" + + dev = qml.device("default.qubit", wires=4) + + @qnode(dev, diff_method=diff_method) + def circuit(x, y): + qml.RX(x[0], wires=0) + qml.Toffoli(wires=(0, 1, 2)) + qml.CRY(x[1], wires=(0, 1)) + qml.Rot(x[2], x[3], y, wires=2) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliX(1)) + + x = pnp.array([0.05, 0.1, 0.2, 0.3], requires_grad=True) + y = pnp.array(0.1, requires_grad=False) + + res = circuit(x, y) + + info = circuit.specs + + assert len(info) == len_info + + assert info["gate_sizes"] == defaultdict(int, {1: 2, 3: 1, 2: 1}) + assert info["gate_types"] == defaultdict(int, {"RX": 1, "Toffoli": 1, "CRY": 1, "Rot": 1}) + assert info["num_operations"] == 4 + assert info["num_observables"] == 2 + assert info["num_diagonalizing_gates"] == 1 + assert info["num_used_wires"] == 3 + assert info["depth"] == 3 + assert info["num_device_wires"] == 4 + + assert info["diff_method"] == diff_method + + if diff_method == "parameter-shift": + assert info["num_parameter_shift_executions"] == 7 + + if diff_method != "backprop": + assert info["device_name"] == "default.qubit" + assert info["num_trainable_params"] == 4 + else: + assert info["device_name"] == "default.qubit.autograd" From bb6626c2cfb1ce9f1a5c32372c772f7dfe8d7b7e Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 22:49:57 +0800 Subject: [PATCH 12/52] update --- pennylane/transforms/batch_transform.py | 60 +++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/pennylane/transforms/batch_transform.py b/pennylane/transforms/batch_transform.py index c41d5252cd8..d65e458492a 100644 --- a/pennylane/transforms/batch_transform.py +++ b/pennylane/transforms/batch_transform.py @@ -14,6 +14,7 @@ """Contains tools and decorators for registering batch transforms.""" # pylint: disable=too-few-public-methods import functools +import types import pennylane as qml from pennylane.new_qnode import QNode @@ -159,9 +160,60 @@ def __init__(self, transform_fn, expand_fn=None, differentiable=True): self.transform_fn = transform_fn self.expand_fn = expand_fn self.differentiable = differentiable + self.qnode_wrapper = self.default_qnode_wrapper functools.update_wrapper(self, transform_fn) - def qnode_execution_wrapper(self, qnode, targs, tkwargs): + def custom_qnode_wrapper(self, fn): + """Register a custom QNode execution wrapper function + for the batch transform. + + **Example** + + .. code-block:: python + + def my_transform(tape, *targs, **tkwargs): + ... + return tapes, processing_fn + + @my_transform.custom_qnode_wrapper + def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): + def wrapper_fn(*args, **kwargs): + # construct QNode + qnode.construct(args, kwargs) + # apply transform to QNode's tapes + tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs) + # execute tapes and return processed result + ... + return processing_fn(results) + return wrapper_fn + + The custom QNode execution wrapper must have arguments + ``self`` (the batch transform object), ``qnode`` (the input QNode + to transform and execute), ``targs`` and ``tkwargs`` (the transform + arguments and keyword arguments respectively). + + It should return a callable object that accepts the *same* arguments + as the QNode, and returns the transformed numerical result. + + The default :meth:`~.default_qnode_wrapper` method may be called + if only pre- or post-processing dependent on QNode arguments is required: + + .. code-block:: python + + @my_transform.custom_qnode_wrapper + def my_custom_qnode_wrapper(self, qnode, targs, tkwargs): + transformed_qnode = self.default_qnode_wrapper(qnode) + + def wrapper_fn(*args, **kwargs): + args, kwargs = pre_process(args, kwargs) + res = transformed_qnode(*args, **kwargs) + ... + return ... + return wrapper_fn + """ + self.qnode_wrapper = types.MethodType(fn, self) + + def default_qnode_wrapper(self, qnode, targs, tkwargs): """A wrapper method that takes a QNode and transform arguments, and returns a function that 'wraps' the QNode execution. @@ -206,10 +258,10 @@ def __call__(self, qnode, *targs, **tkwargs): # tapes, fn = some_transform(tape, *transform_args) return self.construct(qnode, *targs, **tkwargs) - if isinstance(qnode, (qml.QNode, QNode)): + if isinstance(qnode, (qml.QNode, QNode, qml.ExpvalCost)): # Input is a QNode: # result = some_transform(qnode, *transform_args)(*qnode_args) - wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) + wrapper = self.qnode_wrapper(qnode, targs, tkwargs) wrapper = functools.wraps(qnode)(wrapper) else: @@ -231,7 +283,7 @@ def __call__(self, qnode, *targs, **tkwargs): targs = (qnode,) + targs def wrapper(qnode): - _wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs) + _wrapper = self.qnode_wrapper(qnode, targs, tkwargs) _wrapper = functools.wraps(qnode)(_wrapper) return _wrapper From 377d8f30b95da669352ca124594c29c74d2bd2c5 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 23:10:24 +0800 Subject: [PATCH 13/52] update --- tests/transforms/test_metric_tensor.py | 31 +++++++++++++------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/tests/transforms/test_metric_tensor.py b/tests/transforms/test_metric_tensor.py index 3c4d60f1f96..9ece08fea73 100644 --- a/tests/transforms/test_metric_tensor.py +++ b/tests/transforms/test_metric_tensor.py @@ -19,7 +19,6 @@ from scipy.linalg import block_diag import pennylane as qml -from pennylane import QNode, qnode from gate_data import Y, Z @@ -68,7 +67,7 @@ def circuit(a, b): qml.MultiRZ(b, wires=[0, 1, 2]) return qml.expval(qml.PauliX(0)) - circuit = QNode(circuit, dev, diff_method=diff_method) + circuit = qml.QNode(circuit, dev, diff_method=diff_method) params = [0.1, 0.2] result = qml.metric_tensor(circuit)(*params) assert result.shape == (2, 2) @@ -87,7 +86,7 @@ def circuit(a): qml.RX(a, wires=0) return qml.expval(qml.PauliX(0)) - circuit = QNode(circuit, dev, diff_method=diff_method) + circuit = qml.QNode(circuit, dev, diff_method=diff_method) params = [0.1] result = qml.metric_tensor(circuit, hybrid=False)(*params) assert result.shape == (2, 2) @@ -226,7 +225,7 @@ def circuit(a, b, c): qml.PhaseShift(c, wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) - circuit = QNode(circuit, dev) + circuit = qml.QNode(circuit, dev) a = 0.432 b = 0.12 @@ -256,7 +255,7 @@ def circuit(a, b): qml.PhaseShift(b, wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) - circuit = QNode(circuit, dev) + circuit = qml.QNode(circuit, dev) a = np.array([0.432, 0.1]) b = 0.12 @@ -308,7 +307,7 @@ def final(x, y, z, h, g, f): qml.RX(h, wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2)) - final = QNode(final, dev, diff_method=request.param) + final = qml.QNode(final, dev, diff_method=request.param) return dev, final, non_parametrized_layer, a, b, c @@ -387,7 +386,7 @@ def layer2_diag(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.var(qml.PauliZ(2)), qml.var(qml.PauliY(1)) - layer2_diag = QNode(layer2_diag, dev) + layer2_diag = qml.QNode(layer2_diag, dev) def layer2_off_diag_first_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) @@ -397,7 +396,7 @@ def layer2_off_diag_first_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliY(1)) - layer2_off_diag_first_order = QNode(layer2_off_diag_first_order, dev) + layer2_off_diag_first_order = qml.QNode(layer2_off_diag_first_order, dev) def layer2_off_diag_second_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) @@ -407,7 +406,7 @@ def layer2_off_diag_second_order(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.expval(qml.Hermitian(np.kron(Z, Y), wires=[2, 1])) - layer2_off_diag_second_order = QNode(layer2_off_diag_second_order, dev) + layer2_off_diag_second_order = qml.QNode(layer2_off_diag_second_order, dev) # calculate the diagonal terms varK0, varK1 = layer2_diag(x, y, z, h, g, f) @@ -446,7 +445,7 @@ def layer3_diag(x, y, z, h, g, f): qml.RY(f, wires=2) return qml.var(qml.PauliX(1)) - layer3_diag = QNode(layer3_diag, dev) + layer3_diag = qml.QNode(layer3_diag, dev) G3 = layer3_diag(x, y, z, h, g, f) / 4 assert np.allclose(G[3:4, 3:4], G3, atol=tol, rtol=0) @@ -511,7 +510,7 @@ def layer2_diag(x, y, z, h, g, f): non_parametrized_layer(a, b, c) return qml.var(qml.PauliZ(2)), qml.var(qml.PauliY(1)) - layer2_diag = QNode(layer2_diag, dev) + layer2_diag = qml.QNode(layer2_diag, dev) # calculate the diagonal terms varK0, varK1 = layer2_diag(x, y, z, h, g, f) @@ -543,7 +542,7 @@ def layer3_diag(x, y, z, h, g, f): qml.RY(f, wires=2) return qml.var(qml.PauliX(1)) - layer3_diag = QNode(layer3_diag, dev) + layer3_diag = qml.QNode(layer3_diag, dev) G3 = layer3_diag(x, y, z, h, g, f) / 4 assert np.allclose(G[3:4, 3:4], G3, atol=tol, rtol=0) @@ -563,7 +562,7 @@ def test_autograd(self, diff_method, tol): """Test metric tensor differentiability in the autograd interface""" dev = qml.device("default.qubit", wires=2) - @qnode(dev, interface="autograd", diff_method=diff_method) + @qml.qnode(dev, interface="autograd", diff_method=diff_method) def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) @@ -594,7 +593,7 @@ def test_jax(self, diff_method, tol): dev = qml.device("default.qubit.jax", wires=2) - @qnode(dev, interface="jax", diff_method="backprop") + @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) @@ -622,7 +621,7 @@ def test_tf(self, diff_method, tol): dev = qml.device("default.qubit", wires=2) - @qnode(dev, interface="tf", diff_method=diff_method) + @qml.qnode(dev, interface="tf", diff_method=diff_method) def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) @@ -649,7 +648,7 @@ def test_torch(self, diff_method, tol): dev = qml.device("default.qubit", wires=2) - @qnode(dev, interface="torch", diff_method=diff_method) + @qml.qnode(dev, interface="torch", diff_method=diff_method) def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=0) From 7c400cc8b072b7e5cb69f0b9f75eabf3b22d312d Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 10 Sep 2021 23:11:20 +0800 Subject: [PATCH 14/52] remove xfail --- tests/transforms/test_metric_tensor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/transforms/test_metric_tensor.py b/tests/transforms/test_metric_tensor.py index 9ece08fea73..a513103bbe4 100644 --- a/tests/transforms/test_metric_tensor.py +++ b/tests/transforms/test_metric_tensor.py @@ -582,7 +582,6 @@ def cost(weights): ) assert np.allclose(grad, expected, atol=tol, rtol=0) - @pytest.mark.xfail def test_jax(self, diff_method, tol): """Test metric tensor differentiability in the JAX interface""" if diff_method == "parameter-shift": From abc543a2e9ed0fbc69e1db4447b31ee605c9e367 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 14:50:52 +0800 Subject: [PATCH 15/52] more tests --- tests/test_new_qnode.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/test_new_qnode.py b/tests/test_new_qnode.py index 59eff9a3773..638ab0a7326 100644 --- a/tests/test_new_qnode.py +++ b/tests/test_new_qnode.py @@ -61,6 +61,24 @@ def circuit(x): with pytest.raises(qml.QuantumFunctionError, match=expected_error): circuit.interface = test_interface + def test_valid_interface(self): + """Test that changing to a valid interface works as expected, and the + diff method is updated as required.""" + torch = pytest.importorskip("torch") + dev = qml.device("default.qubit", wires=1) + + @qnode(dev, interface="autograd", diff_method="best") + def circuit(x): + qml.RX(wires=0) + return qml.probs(wires=0) + + assert circuit.device.short_name == "default.qubit.autograd" + assert circuit.gradient_fn == "backprop" + + circuit.interface = "torch" + assert circuit.device.short_name == "default.qubit.torch" + assert circuit.gradient_fn == "backprop" + def test_invalid_device(self): """Test that an exception is raised for an invalid device""" with pytest.raises(qml.QuantumFunctionError, match="Invalid device"): @@ -244,6 +262,21 @@ def test_diff_method(self, mocker): # check that get_best_method was only ever called once mock_best.assert_called_once() + def test_gradient_transform(self, mocker): + """Test passing a gradient transform directly to a QNode""" + dev = qml.device("default.qubit", wires=1) + spy = mocker.spy(qml.gradients.finite_difference, "finite_diff_coeffs") + + @qnode(dev, diff_method=qml.gradients.finite_diff) + def circuit(x): + qml.RX(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + assert circuit.gradient_fn is qml.gradients.finite_diff + + qml.grad(circuit)(0.5) + spy.assert_called() + def test_unknown_diff_method_string(self): """Test that an exception is raised for an unknown differentiation method string""" dev = qml.device("default.qubit", wires=1) From 2a3c67b26ba6a6ed9d731a29ea535eb7574b44d2 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 15:03:29 +0800 Subject: [PATCH 16/52] rever --- pennylane/gradients/parameter_shift.py | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index 6fe6ad83443..d7655420689 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -331,22 +331,8 @@ def var_param_shift(tape, argnum, shift=np.pi / 2, gradient_recipes=None, f0=Non def processing_fn(results): # We need to expand the dimensions of the variance mask, # and convert it to be the same type as the results. - res = results[0] - ragged = getattr(results[0], "dtype", None) is np.dtype("object") - - mask = [] - for m, r in zip(var_mask, results[0]): - array_func = np.ones if m else np.zeros - shape = qml.math.shape(r) - shape = (1,) if shape == tuple() else shape - mask.append(array_func(shape, dtype=bool)) - - if ragged: - res = qml.math.hstack(res) - mask = qml.math.hstack(mask) - - mask = qml.math.convert_like(qml.math.reshape(mask, [-1, 1]), res) - f0 = qml.math.expand_dims(res, -1) + mask = qml.math.convert_like(qml.math.reshape(var_mask, [-1, 1]), results[0]) + f0 = qml.math.expand_dims(results[0], -1) pdA = pdA_fn(results[1:tape_boundary]) pdA2 = 0 From 4e54a2660b7893810a872287b83e5bc5ee537179 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 15:04:25 +0800 Subject: [PATCH 17/52] Add more tests --- tests/interfaces/test_batch_autograd.py | 32 - tests/interfaces/test_batch_autograd_qnode.py | 1238 +++++++++++++++++ tests/interfaces/test_batch_tensorflow.py | 30 - .../interfaces/test_batch_tensorflow_qnode.py | 1057 ++++++++++++++ tests/interfaces/test_batch_torch.py | 50 - tests/interfaces/test_batch_torch_qnode.py | 1099 +++++++++++++++ tests/interfaces/test_qnode_autograd.py | 16 +- tests/interfaces/test_qnode_tf.py | 15 +- tests/interfaces/test_qnode_torch.py | 15 +- 9 files changed, 3434 insertions(+), 118 deletions(-) create mode 100644 tests/interfaces/test_batch_autograd_qnode.py create mode 100644 tests/interfaces/test_batch_tensorflow_qnode.py create mode 100644 tests/interfaces/test_batch_torch_qnode.py diff --git a/tests/interfaces/test_batch_autograd.py b/tests/interfaces/test_batch_autograd.py index aca6ec91103..27f00b902c3 100644 --- a/tests/interfaces/test_batch_autograd.py +++ b/tests/interfaces/test_batch_autograd.py @@ -439,38 +439,6 @@ def cost(a, b, device): expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]] assert np.allclose(res, expected, atol=tol, rtol=0) - def test_tape_no_parameters(self, execute_kwargs, tol): - """Test that a tape with no parameters is correctly - ignored during the gradient computation""" - dev = qml.device("default.qubit", wires=1) - - def cost(params): - with qml.tape.JacobianTape() as tape1: - qml.Hadamard(0) - qml.expval(qml.PauliX(0)) - - with qml.tape.JacobianTape() as tape2: - qml.RY(np.array(0.5, requires_grad=False), wires=0) - qml.expval(qml.PauliZ(0)) - - with qml.tape.JacobianTape() as tape3: - qml.RY(params[0], wires=0) - qml.RX(params[1], wires=0) - qml.expval(qml.PauliZ(0)) - - return sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) - - params = np.array([0.1, 0.2], requires_grad=True) - x, y = params - - res = cost(params) - expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) - assert np.allclose(res, expected, atol=tol, rtol=0) - - grad = qml.grad(cost)(params) - expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] - assert np.allclose(grad, expected, atol=tol, rtol=0) - def test_reusing_quantum_tape(self, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = np.array(0.1, requires_grad=True) diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py new file mode 100644 index 00000000000..744c9f969c5 --- /dev/null +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -0,0 +1,1238 @@ +# Copyright 2018-2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Integration tests for using the autograd interface with a QNode""" +import pytest +from pennylane import numpy as np + +import pennylane as qml +from pennylane.new_qnode import qnode, QNode +from pennylane.tape import JacobianTape + +qubit_device_and_diff_method = [ + ["default.qubit", "finite-diff", "backward"], + ["default.qubit", "parameter-shift", "backward"], + ["default.qubit", "backprop", "forward"], + ["default.qubit", "adjoint", "forward"], + ["default.qubit", "adjoint", "backward"], +] + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestQNode: + """Test that using the QNode with Autograd integrates with the PennyLane stack""" + + def test_nondiff_param_unwrapping(self, dev_name, diff_method, mode, mocker): + """Test that non-differentiable parameters are correctly unwrapped + to NumPy ndarrays or floats (if 0-dimensional)""" + if diff_method != "parameter-shift": + pytest.skip("Test only supports parameter-shift") + + dev = qml.device("default.qubit", wires=1) + + @qnode(dev, interface="autograd", diff_method="parameter-shift") + def circuit(x, y): + qml.RX(x[0], wires=0) + qml.Rot(*x[1:], wires=0) + qml.RY(y[0], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = np.array([0.1, 0.2, 0.3, 0.4], requires_grad=False) + y = np.array([0.5], requires_grad=True) + + param_data = [] + + def mock_apply(*args, **kwargs): + for op in args[0]: + param_data.extend(op.data.copy()) + + mocker.patch.object(dev, "apply", side_effect=mock_apply) + circuit(x, y) + assert param_data == [0.1, 0.2, 0.3, 0.4, 0.5] + assert not any(isinstance(p, np.tensor) for p in param_data) + + # test the jacobian works correctly + param_data = [] + qml.grad(circuit)(x, y) + assert param_data == [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5 + np.pi / 2, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5 - np.pi / 2, + ] + assert not any(isinstance(p, np.tensor) for p in param_data) + + def test_execution_no_interface(self, dev_name, diff_method, mode): + """Test execution works without an interface""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, interface=None) + def circuit(a): + qml.RY(a, wires=0) + qml.RX(0.2, wires=0) + return qml.expval(qml.PauliZ(0)) + + a = np.array(0.1, requires_grad=True) + + res = circuit(a) + + assert circuit.qtape.interface == None + + # without the interface, the QNode simply returns a scalar array + assert isinstance(res, np.ndarray) + assert res.shape == tuple() + + # gradients should cause an error + with pytest.raises(TypeError, match="must be real number, not ArrayBox"): + qml.grad(circuit)(a) + + def test_execution_with_interface(self, dev_name, diff_method, mode): + """Test execution works with the interface""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, interface="autograd", diff_method=diff_method) + def circuit(a): + qml.RY(a, wires=0) + qml.RX(0.2, wires=0) + return qml.expval(qml.PauliZ(0)) + + a = np.array(0.1, requires_grad=True) + circuit(a) + + assert circuit.interface == "autograd" + + # the tape is able to deduce trainable parameters + assert circuit.qtape.trainable_params == {0} + + # gradients should work + grad = qml.grad(circuit)(a) + assert isinstance(grad, float) + assert grad.shape == tuple() + + def test_jacobian(self, dev_name, diff_method, mode, mocker, tol): + """Test jacobian calculation""" + if diff_method == "parameter-shift": + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + elif diff_method == "finite-diff": + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a = np.array(0.1, requires_grad=True) + b = np.array(0.2, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))] + + res = circuit(a, b) + + assert circuit.qtape.trainable_params == {0, 1} + assert res.shape == (2,) + + expected = [np.cos(a), -np.cos(a) * np.sin(b)] + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.jacobian(circuit)(a, b) + expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]] + assert np.allclose(res, expected, atol=tol, rtol=0) + + if diff_method in ("parameter-shift", "finite-diff"): + spy.assert_called() + + def test_jacobian_no_evaluate(self, dev_name, diff_method, mode, mocker, tol): + """Test jacobian calculation when no prior circuit evaluation has been performed""" + if diff_method == "parameter-shift": + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + elif diff_method == "finite-diff": + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a = np.array(0.1, requires_grad=True) + b = np.array(0.2, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))] + + jac_fn = qml.jacobian(circuit) + res = jac_fn(a, b) + expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]] + assert np.allclose(res, expected, atol=tol, rtol=0) + + if diff_method in ("parameter-shift", "finite-diff"): + spy.assert_called() + + # call the Jacobian with new parameters + a = np.array(0.6, requires_grad=True) + b = np.array(0.832, requires_grad=True) + + res = jac_fn(a, b) + expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]] + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_jacobian_options(self, dev_name, diff_method, mode, mocker, tol): + """Test setting jacobian options""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a = np.array([0.1, 0.2], requires_grad=True) + + dev = qml.device("default.qubit", wires=1) + + @qnode(dev, interface="autograd", h=1e-8, order=2) + def circuit(a): + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + qml.jacobian(circuit)(a) + + for args in spy.call_args_list: + assert args[1]["order"] == 2 + assert args[1]["h"] == 1e-8 + + def test_changing_trainability(self, dev_name, diff_method, mode, mocker, tol): + """Test changing the trainability of parameters changes the + number of differentiation requests made""" + if diff_method != "parameter-shift": + pytest.skip("Test only supports parameter-shift") + + a = np.array(0.1, requires_grad=True) + b = np.array(0.2, requires_grad=True) + + dev = qml.device("default.qubit", wires=2) + + @qnode(dev, interface="autograd", diff_method="parameter-shift") + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) + + def loss(a, b): + return np.sum(circuit(a, b)) + + grad_fn = qml.grad(loss) + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + res = grad_fn(a, b) + + # the tape has reported both arguments as trainable + assert circuit.qtape.trainable_params == {0, 1} + + expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] + assert np.allclose(res, expected, atol=tol, rtol=0) + + # The parameter-shift rule has been called for each argument + assert len(spy.spy_return[0]) == 4 + + # make the second QNode argument a constant + a = np.array(0.54, requires_grad=True) + b = np.array(0.8, requires_grad=False) + + res = grad_fn(a, b) + + # the tape has reported only the first argument as trainable + assert circuit.qtape.trainable_params == {0} + + expected = [-np.sin(a) + np.sin(a) * np.sin(b)] + assert np.allclose(res, expected, atol=tol, rtol=0) + + # The parameter-shift rule has been called only once + assert len(spy.spy_return[0]) == 2 + + # trainability also updates on evaluation + a = np.array(0.54, requires_grad=False) + b = np.array(0.8, requires_grad=True) + circuit(a, b) + assert circuit.qtape.trainable_params == {1} + + def test_classical_processing(self, dev_name, diff_method, mode, tol): + """Test classical processing within the quantum tape""" + a = np.array(0.1, requires_grad=True) + b = np.array(0.2, requires_grad=False) + c = np.array(0.3, requires_grad=True) + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(a, b, c): + qml.RY(a * c, wires=0) + qml.RZ(b, wires=0) + qml.RX(c + c ** 2 + np.sin(a), wires=0) + return qml.expval(qml.PauliZ(0)) + + res = qml.jacobian(circuit)(a, b, c) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {0, 2} + tape_params = np.array(circuit.qtape.get_parameters()) + assert np.all(tape_params == [a * c, c + c ** 2 + np.sin(a)]) + + assert res.shape == (2,) + + def test_no_trainable_parameters(self, dev_name, diff_method, mode, tol): + """Test evaluation and Jacobian if there are no trainable parameters""" + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=0) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) + + a = np.array(0.1, requires_grad=False) + b = np.array(0.2, requires_grad=False) + + res = circuit(a, b) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == set() + + assert res.shape == (2,) + assert isinstance(res, np.ndarray) + + assert not qml.jacobian(circuit)(a, b) + + def cost(a, b): + return np.sum(circuit(a, b)) + + with pytest.warns(UserWarning, match="Output seems independent of input"): + grad = qml.grad(cost)(a, b) + + assert grad == tuple() + + def test_matrix_parameter(self, dev_name, diff_method, mode, tol): + """Test that the autograd interface works correctly + with a matrix parameter""" + U = np.array([[0, 1], [1, 0]], requires_grad=False) + a = np.array(0.1, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(U, a): + qml.QubitUnitary(U, wires=0) + qml.RY(a, wires=0) + return qml.expval(qml.PauliZ(0)) + + res = circuit(U, a) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1} + + res = qml.grad(circuit)(U, a) + assert np.allclose(res, np.sin(a), atol=tol, rtol=0) + + def test_gradient_non_differentiable_exception(self, dev_name, diff_method, mode): + """Test that an exception is raised if non-differentiable data is + differentiated""" + dev = qml.device(dev_name, wires=2) + + @qnode(dev, interface="autograd", diff_method=diff_method) + def circuit(data1): + qml.templates.AmplitudeEmbedding(data1, wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + grad_fn = qml.grad(circuit, argnum=0) + data1 = np.array([0, 1, 1, 0], requires_grad=False) / np.sqrt(2) + + with pytest.raises(qml.numpy.NonDifferentiableError, match="is non-differentiable"): + grad_fn(data1) + + @pytest.mark.xfail + def test_differentiable_expand(self, dev_name, diff_method, mode, tol): + """Test that operation and nested tapes expansion + is differentiable""" + + class U3(qml.U3): + def expand(self): + theta, phi, lam = self.data + wires = self.wires + + with JacobianTape() as tape: + qml.Rot(lam, theta, -lam, wires=wires) + qml.PhaseShift(phi + lam, wires=wires) + + return tape + + dev = qml.device(dev_name, wires=1) + a = np.array(0.1, requires_grad=False) + p = np.array([0.1, 0.2, 0.3], requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(a, p): + qml.RX(a, wires=0) + U3(p[0], p[1], p[2], wires=0) + return qml.expval(qml.PauliX(0)) + + res = circuit(a, p) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1, 2, 3, 4} + elif diff_method == "backprop": + # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() + # is never called. As a result, JacobianTape.trainable_params is never set --- the ML + # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. + assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + + assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] + + if diff_method == "finite-diff": + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + elif diff_method == "backprop": + # In backprop mode, all parameters are returned. + assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + + expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * ( + np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2]) + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.grad(circuit)(a, p) + expected = np.array( + [ + np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])), + np.cos(p[1]) * np.cos(p[2]) * np.sin(a) + - np.sin(p[1]) + * (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])), + np.sin(a) + * (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])), + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + +class TestShotsIntegration: + """Test that the QNode correctly changes shot value, and + differentiates it.""" + + def test_changing_shots(self, mocker, tol): + """Test that changing shots works on execution""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = np.array([0.543, -0.654], requires_grad=True) + + @qnode(dev, diff_method=qml.gradients.param_shift) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + spy = mocker.spy(dev, "sample") + + # execute with device default shots (None) + res = circuit(a, b) + assert np.allclose(res, -np.cos(a) * np.sin(b), atol=tol, rtol=0) + spy.assert_not_called() + + # execute with shots=100 + res = circuit(a, b, shots=100) + spy.assert_called() + assert spy.spy_return.shape == (100,) + + # device state has been unaffected + assert dev.shots is None + spy = mocker.spy(dev, "sample") + res = circuit(a, b) + assert np.allclose(res, -np.cos(a) * np.sin(b), atol=tol, rtol=0) + spy.assert_not_called() + + def test_gradient_integration(self, tol): + """Test that temporarily setting the shots works + for gradient computations""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = np.array([0.543, -0.654], requires_grad=True) + + @qnode(dev, diff_method=qml.gradients.param_shift) + def cost_fn(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + res = qml.jacobian(cost_fn)(a, b, shots=[10000, 10000, 10000]) + assert dev.shots is None + assert len(res) == 3 + + expected = [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] + assert np.allclose(np.mean(res, axis=0), expected, atol=0.1, rtol=0) + + def test_update_diff_method(self, mocker, tol): + """Test that temporarily setting the shots updates the diff method""" + dev = qml.device("default.qubit", wires=2, shots=100) + a, b = np.array([0.543, -0.654], requires_grad=True) + + spy = mocker.spy(qml, "execute") + + @qnode(dev) + def cost_fn(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + # since we are using finite shots, parameter-shift will + # be chosen + assert cost_fn.gradient_fn is qml.gradients.param_shift + + cost_fn(a, b) + assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift + + # if we set the shots to None, backprop can now be used + cost_fn(a, b, shots=None) + assert spy.call_args[1]["gradient_fn"] == "backprop" + + # original QNode settings are unaffected + assert cost_fn.gradient_fn is qml.gradients.param_shift + cost_fn(a, b) + assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestQubitIntegration: + """Tests that ensure various qubit circuits integrate correctly""" + + def test_probability_differentiation(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with a single prob output""" + + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x = np.array(0.543, requires_grad=True) + y = np.array(-0.654, requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=[1]) + + res = qml.jacobian(circuit)(x, y) + + expected = np.array( + [ + [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2], + [np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_multiple_probability_differentiation(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with multiple prob outputs""" + + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x = np.array(0.543, requires_grad=True) + y = np.array(-0.654, requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=[0]), qml.probs(wires=[1]) + + res = circuit(x, y) + + expected = np.array( + [ + [np.cos(x / 2) ** 2, np.sin(x / 2) ** 2], + [(1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.jacobian(circuit)(x, y) + expected = np.array( + [ + [[-np.sin(x) / 2, 0], [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]], + [ + [np.sin(x) / 2, 0], + [np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2], + ], + ] + ) + + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_ragged_differentiation(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with prob and expval outputs""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x = np.array(0.543, requires_grad=True) + y = np.array(-0.654, requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.probs(wires=[1])] + + res = circuit(x, y) + + expected = np.array( + [np.cos(x), (1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.jacobian(circuit)(x, y) + expected = np.array( + [ + [-np.sin(x), 0], + [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2], + [np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_ragged_differentiation_variance(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with prob and variance outputs""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x = np.array(0.543, requires_grad=True) + y = np.array(-0.654, requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return [qml.var(qml.PauliZ(0)), qml.probs(wires=[1])] + + res = circuit(x, y) + + expected = np.array( + [np.sin(x) ** 2, (1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.jacobian(circuit)(x, y) + expected = np.array( + [ + [2 * np.cos(x) * np.sin(x), 0], + [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2], + [np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_sampling(self, dev_name, diff_method, mode): + """Test sampling works as expected""" + if mode == "forward": + pytest.skip("Sampling not possible with forward mode differentiation.") + + dev = qml.device(dev_name, wires=2, shots=10) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(): + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + return [qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))] + + res = circuit() + + assert res.shape == (2, 10) + assert isinstance(res, np.ndarray) + + @pytest.mark.xfail + def test_chained_qnodes(self, dev_name, diff_method, mode): + """Test that the gradient of chained QNodes works without error""" + dev = qml.device(dev_name, wires=2) + + @qnode(dev, interface="autograd", diff_method=diff_method) + def circuit1(weights): + qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) + + @qnode(dev, interface="autograd", diff_method=diff_method) + def circuit2(data, weights): + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) + return qml.expval(qml.PauliX(0)) + + def cost(weights): + w1, w2 = weights + c1 = circuit1(w1) + c2 = circuit2(c1, w2) + return np.sum(c2) ** 2 + + w1 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=3) + w2 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=4) + + weights = [w1, w2] + + grad_fn = qml.grad(cost) + res = grad_fn(weights) + + assert len(res) == 2 + + def test_chained_gradient_value(self, dev_name, diff_method, mode, tol): + """Test that the returned gradient value for two chained qubit QNodes + is correct.""" + dev1 = qml.device(dev_name, wires=3) + + @qnode(dev1, diff_method=diff_method) + def circuit1(a, b, c): + qml.RX(a, wires=0) + qml.RX(b, wires=1) + qml.RX(c, wires=2) + qml.CNOT(wires=[0, 1]) + qml.CNOT(wires=[1, 2]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(2)) + + dev2 = qml.device("default.qubit", wires=2) + + @qnode(dev2, diff_method=diff_method) + def circuit2(data, weights): + qml.RX(data[0], wires=0) + qml.RX(data[1], wires=1) + qml.CNOT(wires=[0, 1]) + qml.RZ(weights[0], wires=0) + qml.RZ(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliX(0) @ qml.PauliY(1)) + + def cost(a, b, c, weights): + return circuit2(circuit1(a, b, c), weights) + + grad_fn = qml.grad(cost) + + # Set the first parameter of circuit1 as non-differentiable. + a = np.array(0.4, requires_grad=False) + + # The remaining free parameters are all differentiable. + b = 0.5 + c = 0.1 + weights = np.array([0.2, 0.3]) + + res = grad_fn(a, b, c, weights) + + # Output should have shape [dcost/db, dcost/dc, dcost/dw], + # where b,c are scalars, and w is a vector of length 2. + assert len(res) == 3 + assert res[0].shape == tuple() # scalar + assert res[1].shape == tuple() # scalar + assert res[2].shape == (2,) # vector + + cacbsc = np.cos(a) * np.cos(b) * np.sin(c) + + expected = np.array( + [ + # analytic expression for dcost/db + -np.cos(a) + * np.sin(b) + * np.sin(c) + * np.cos(cacbsc) + * np.sin(weights[0]) + * np.sin(np.cos(a)), + # analytic expression for dcost/dc + np.cos(a) + * np.cos(b) + * np.cos(c) + * np.cos(cacbsc) + * np.sin(weights[0]) + * np.sin(np.cos(a)), + # analytic expression for dcost/dw[0] + np.sin(cacbsc) * np.cos(weights[0]) * np.sin(np.cos(a)), + # analytic expression for dcost/dw[1] + 0, + ] + ) + + # np.hstack 'flattens' the ragged gradient array allowing it + # to be compared with the expected result + assert np.allclose(np.hstack(res), expected, atol=tol, rtol=0) + + if diff_method != "backprop": + # Check that the gradient was computed + # for all parameters in circuit2 + assert circuit2.qtape.trainable_params == {0, 1, 2, 3} + + # Check that the parameter-shift rule was not applied + # to the first parameter of circuit1. + assert circuit1.qtape.trainable_params == {1, 2} + + def test_second_derivative(self, dev_name, diff_method, mode, tol): + """Test second derivative calculation of a scalar valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = np.array([1.0, 2.0], requires_grad=True) + res = circuit(x) + g = qml.grad(circuit)(x) + g2 = qml.grad(lambda x: np.sum(qml.grad(circuit)(x)))(x) + + a, b = x + + expected_res = np.cos(a) * np.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [-np.sin(a) * np.cos(b), -np.cos(a) * np.sin(b)] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_g2 = [ + -np.cos(a) * np.cos(b) + np.sin(a) * np.sin(b), + np.sin(a) * np.sin(b) - np.cos(a) * np.cos(b), + ] + assert np.allclose(g2, expected_g2, atol=tol, rtol=0) + + def test_hessian(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a scalar valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = np.array([1.0, 2.0], requires_grad=True) + res = circuit(x) + + a, b = x + + expected_res = np.cos(a) * np.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + grad_fn = qml.grad(circuit) + g = grad_fn(x) + + expected_g = [-np.sin(a) * np.cos(b), -np.cos(a) * np.sin(b)] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + hess = qml.jacobian(grad_fn)(x) + + expected_hess = [ + [-np.cos(a) * np.cos(b), np.sin(a) * np.sin(b)], + [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_unused_parameter(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a scalar valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(x): + qml.RY(x[0], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = np.array([1.0, 2.0], requires_grad=True) + res = circuit(x) + + a, b = x + + expected_res = np.cos(a) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + grad_fn = qml.grad(circuit) + g = grad_fn(x) + + expected_g = [-np.sin(a), 0] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + hess = qml.jacobian(grad_fn)(x) + + expected_hess = [ + [-np.cos(a), 0], + [0, 0], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_vector_valued(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a vector valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.probs(wires=0) + + x = np.array([1.0, 2.0], requires_grad=True) + res = circuit(x) + + a, b = x + + expected_res = [0.5 + 0.5 * np.cos(a) * np.cos(b), 0.5 - 0.5 * np.cos(a) * np.cos(b)] + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + jac_fn = qml.jacobian(circuit) + g = jac_fn(x) + + expected_g = [ + [-0.5 * np.sin(a) * np.cos(b), -0.5 * np.cos(a) * np.sin(b)], + [0.5 * np.sin(a) * np.cos(b), 0.5 * np.cos(a) * np.sin(b)], + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + hess = qml.jacobian(jac_fn)(x) + + expected_hess = [ + [ + [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.sin(a) * np.sin(b)], + [0.5 * np.sin(a) * np.sin(b), -0.5 * np.cos(a) * np.cos(b)], + ], + [ + [0.5 * np.cos(a) * np.cos(b), -0.5 * np.sin(a) * np.sin(b)], + [-0.5 * np.sin(a) * np.sin(b), 0.5 * np.cos(a) * np.cos(b)], + ], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_vector_valued_postprocessing(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a vector valued QNode with post-processing""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(x): + qml.RX(x[0], wires=0) + qml.RY(x[1], wires=0) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(0))] + + def cost_fn(x): + return x @ circuit(x) + + x = np.array([0.76, -0.87], requires_grad=True) + res = cost_fn(x) + + a, b = x + + expected_res = x @ [np.cos(a) * np.cos(b), np.cos(a) * np.cos(b)] + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + grad_fn = qml.grad(cost_fn) + g = grad_fn(x) + + expected_g = [ + np.cos(b) * (np.cos(a) - (a + b) * np.sin(a)), + np.cos(a) * (np.cos(b) - (a + b) * np.sin(b)), + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + hess = qml.jacobian(grad_fn)(x) + + expected_hess = [ + [ + -(np.cos(b) * ((a + b) * np.cos(a) + 2 * np.sin(a))), + -(np.cos(b) * np.sin(a)) + (-np.cos(a) + (a + b) * np.sin(a)) * np.sin(b), + ], + [ + -(np.cos(b) * np.sin(a)) + (-np.cos(a) + (a + b) * np.sin(a)) * np.sin(b), + -(np.cos(a) * ((a + b) * np.cos(b) + 2 * np.sin(b))), + ], + ] + + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_vector_valued_separate_args(self, dev_name, diff_method, mode, mocker, tol): + """Test hessian calculation of a vector valued QNode that has separate input arguments""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=0) + return qml.probs(wires=0) + + a = np.array(1.0, requires_grad=True) + b = np.array(2.0, requires_grad=True) + res = circuit(a, b) + + expected_res = [0.5 + 0.5 * np.cos(a) * np.cos(b), 0.5 - 0.5 * np.cos(a) * np.cos(b)] + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + jac_fn = qml.jacobian(circuit) + g = jac_fn(a, b) + + expected_g = [ + [-0.5 * np.sin(a) * np.cos(b), -0.5 * np.cos(a) * np.sin(b)], + [0.5 * np.sin(a) * np.cos(b), 0.5 * np.cos(a) * np.sin(b)], + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + hess = qml.jacobian(jac_fn)(a, b) + + if diff_method == "backprop": + spy.assert_not_called() + elif diff_method == "parameter-shift": + spy.assert_called() + + expected_hess = [ + [ + [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.sin(a) * np.sin(b)], + [0.5 * np.cos(a) * np.cos(b), -0.5 * np.sin(a) * np.sin(b)], + ], + [ + [0.5 * np.sin(a) * np.sin(b), -0.5 * np.cos(a) * np.cos(b)], + [-0.5 * np.sin(a) * np.sin(b), 0.5 * np.cos(a) * np.cos(b)], + ], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_ragged(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a ragged QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode, max_diff=2) + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + qml.RY(x[0], wires=1) + qml.RX(x[1], wires=1) + return qml.expval(qml.PauliZ(0)), qml.probs(wires=1) + + x = np.array([1.0, 2.0], requires_grad=True) + res = circuit(x) + + a, b = x + + expected_res = [ + np.cos(a) * np.cos(b), + 0.5 + 0.5 * np.cos(a) * np.cos(b), + 0.5 - 0.5 * np.cos(a) * np.cos(b), + ] + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + jac_fn = qml.jacobian(circuit) + g = jac_fn(x) + + expected_g = [ + [-np.sin(a) * np.cos(b), -np.cos(a) * np.sin(b)], + [-0.5 * np.sin(a) * np.cos(b), -0.5 * np.cos(a) * np.sin(b)], + [0.5 * np.sin(a) * np.cos(b), 0.5 * np.cos(a) * np.sin(b)], + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + hess = qml.jacobian(jac_fn)(x) + expected_hess = [ + [ + [-np.cos(a) * np.cos(b), np.sin(a) * np.sin(b)], + [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)], + ], + [ + [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.sin(a) * np.sin(b)], + [0.5 * np.sin(a) * np.sin(b), -0.5 * np.cos(a) * np.cos(b)], + ], + [ + [0.5 * np.cos(a) * np.cos(b), -0.5 * np.sin(a) * np.sin(b)], + [-0.5 * np.sin(a) * np.sin(b), 0.5 * np.cos(a) * np.cos(b)], + ], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_state(self, dev_name, diff_method, mode, tol): + """Test that the state can be returned and differentiated""" + if diff_method == "adjoint": + pytest.skip("Adjoint does not support states") + + dev = qml.device(dev_name, wires=2) + + x = np.array(0.543, requires_grad=True) + y = np.array(-0.654, requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.state() + + def cost_fn(x, y): + res = circuit(x, y) + assert res.dtype is np.dtype("complex128") + probs = np.abs(res) ** 2 + return probs[0] + probs[2] + + res = cost_fn(x, y) + + if diff_method not in {"backprop"}: + pytest.skip("Test only supports backprop") + + res = qml.jacobian(cost_fn)(x, y) + expected = np.array([-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_projector(self, dev_name, diff_method, mode, tol): + """Test that the variance of a projector is correctly returned""" + if diff_method == "adjoint": + pytest.skip("Adjoint does not support projectors") + + dev = qml.device(dev_name, wires=2) + P = np.array([1], requires_grad=False) + x, y = 0.765, -0.654 + + @qnode(dev, diff_method=diff_method, interface="autograd", mode=mode) + def circuit(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.var(qml.Projector(P, wires=0) @ qml.PauliX(1)) + + res = circuit(x, y) + expected = 0.25 * np.sin(x / 2) ** 2 * (3 + np.cos(2 * y) + 2 * np.cos(x) * np.sin(y) ** 2) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.jacobian(circuit)(x, y) + expected = np.array( + [ + [ + 0.5 * np.sin(x) * (np.cos(x / 2) ** 2 + np.cos(2 * y) * np.sin(x / 2) ** 2), + -2 * np.cos(y) * np.sin(x / 2) ** 4 * np.sin(y), + ] + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + +@pytest.mark.parametrize( + "diff_method,kwargs", + [["finite-diff", {}], ("parameter-shift", {}), ("parameter-shift", {"force_order2": True})], +) +class TestCV: + """Tests for CV integration""" + + def test_first_order_observable(self, diff_method, kwargs, tol): + """Test variance of a first order CV observable""" + dev = qml.device("default.gaussian", wires=1) + + r = 0.543 + phi = -0.654 + + @qnode(dev, diff_method=diff_method, **kwargs) + def circuit(r, phi): + qml.Squeezing(r, 0, wires=0) + qml.Rotation(phi, wires=0) + return qml.var(qml.X(0)) + + res = circuit(r, phi) + expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2 + assert np.allclose(res, expected, atol=tol, rtol=0) + + # circuit jacobians + res = qml.jacobian(circuit)(r, phi) + expected = np.array( + [ + [ + 2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2, + 2 * np.sinh(2 * r) * np.sin(2 * phi), + ] + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_second_order_observable(self, diff_method, kwargs, tol): + """Test variance of a second order CV expectation value""" + dev = qml.device("default.gaussian", wires=1) + + n = 0.12 + a = 0.765 + + @qnode(dev, diff_method=diff_method, **kwargs) + def circuit(n, a): + qml.ThermalState(n, wires=0) + qml.Displacement(a, 0, wires=0) + return qml.var(qml.NumberOperator(0)) + + res = circuit(n, a) + expected = n ** 2 + n + np.abs(a) ** 2 * (1 + 2 * n) + assert np.allclose(res, expected, atol=tol, rtol=0) + + # circuit jacobians + res = qml.jacobian(circuit)(n, a) + expected = np.array([[2 * a ** 2 + 2 * n + 1, 2 * a * (2 * n + 1)]]) + assert np.allclose(res, expected, atol=tol, rtol=0) + + +def test_adjoint_reuse_device_state(mocker): + """Tests that the autograd interface reuses the device state for adjoint differentiation""" + dev = qml.device("default.qubit", wires=1) + + @qnode(dev, diff_method="adjoint") + def circ(x): + qml.RX(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + spy = mocker.spy(dev, "adjoint_jacobian") + + grad = qml.grad(circ)(1.0) + assert circ.device.num_executions == 1 + + spy.assert_called_with(mocker.ANY, use_device_state=True) diff --git a/tests/interfaces/test_batch_tensorflow.py b/tests/interfaces/test_batch_tensorflow.py index 948d0a858f2..9b2969f9b8e 100644 --- a/tests/interfaces/test_batch_tensorflow.py +++ b/tests/interfaces/test_batch_tensorflow.py @@ -367,36 +367,6 @@ def test_jacobian(self, execute_kwargs, tol): expected = [[-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]] assert np.allclose(expected, [agrad, bgrad], atol=tol, rtol=0) - def test_tape_no_parameters(self, execute_kwargs, tol): - """Test that a tape with no parameters is correctly - ignored during the gradient computation""" - dev = qml.device("default.qubit", wires=1) - params = tf.Variable([0.1, 0.2], dtype=tf.float64) - x, y = 1.0 * params - - with tf.GradientTape() as t: - with qml.tape.JacobianTape() as tape1: - qml.Hadamard(0) - qml.expval(qml.PauliX(0)) - - with qml.tape.JacobianTape() as tape2: - qml.RY(0.5, wires=0) - qml.expval(qml.PauliZ(0)) - - with qml.tape.JacobianTape() as tape3: - qml.RY(params[0], wires=0) - qml.RX(params[1], wires=0) - qml.expval(qml.PauliZ(0)) - - res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) - - expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) - assert np.allclose(res, expected, atol=tol, rtol=0) - - grad = t.gradient(res, params) - expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] - assert np.allclose(grad, expected, atol=tol, rtol=0) - def test_reusing_quantum_tape(self, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = tf.Variable(0.1, dtype=tf.float64) diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py new file mode 100644 index 00000000000..a23ca190bee --- /dev/null +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -0,0 +1,1057 @@ +# Copyright 2018-2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Integration tests for using the TensorFlow interface with a QNode""" +import pytest +import numpy as np + +tf = pytest.importorskip("tensorflow") + +import pennylane as qml +from pennylane.new_qnode import qnode, QNode +from pennylane.tape import JacobianTape + + +qubit_device_and_diff_method = [ + ["default.qubit", "finite-diff", "backward"], + ["default.qubit", "parameter-shift", "backward"], + ["default.qubit", "backprop", "forward"], + ["default.qubit", "adjoint", "forward"], + ["default.qubit", "adjoint", "backward"], +] + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestQNode: + """Test that using the QNode with TensorFlow integrates with the PennyLane stack""" + + def test_execution_with_interface(self, dev_name, diff_method, mode): + """Test execution works with the interface""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, interface="tf", diff_method=diff_method, mode=mode) + def circuit(a): + qml.RY(a, wires=0) + qml.RX(0.2, wires=0) + return qml.expval(qml.PauliZ(0)) + + a = tf.Variable(0.1) + circuit(a) + + # if executing outside a gradient tape, the number of trainable parameters + # cannot be determined by TensorFlow + assert circuit.qtape.trainable_params == set() + + with tf.GradientTape() as tape: + res = circuit(a) + + assert circuit.interface == "tf" + + # with the interface, the tape returns tensorflow tensors + assert isinstance(res, tf.Tensor) + assert res.shape == tuple() + + # the tape is able to deduce trainable parameters + assert circuit.qtape.trainable_params == {0} + + # gradients should work + grad = tape.gradient(res, a) + assert isinstance(grad, tf.Tensor) + assert grad.shape == tuple() + + def test_interface_swap(self, dev_name, diff_method, mode, tol): + """Test that the TF interface can be applied to a QNode + with a pre-existing interface""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, interface="autograd", diff_method=diff_method, mode=mode) + def circuit(a): + qml.RY(a, wires=0) + qml.RX(0.2, wires=0) + return qml.expval(qml.PauliZ(0)) + + from pennylane import numpy as anp + + a = anp.array(0.1, requires_grad=True) + + res1 = circuit(a) + grad_fn = qml.grad(circuit) + grad1 = grad_fn(a) + + # switch to TF interface + circuit.interface = "tf" + + a = tf.Variable(0.1, dtype=tf.float64) + + with tf.GradientTape() as tape: + res2 = circuit(a) + + grad2 = tape.gradient(res2, a) + assert np.allclose(res1, res2, atol=tol, rtol=0) + assert np.allclose(grad1, grad2, atol=tol, rtol=0) + + def test_drawing(self, dev_name, diff_method, mode): + """Test circuit drawing when using the TF interface""" + + x = tf.Variable(0.1, dtype=tf.float64) + y = tf.Variable([0.2, 0.3], dtype=tf.float64) + z = tf.Variable(0.4, dtype=tf.float64) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, interface="tf", diff_method=diff_method, mode=mode) + def circuit(p1, p2=y, **kwargs): + qml.RX(p1, wires=0) + qml.RY(p2[0] * p2[1], wires=1) + qml.RX(kwargs["p3"], wires=0) + qml.CNOT(wires=[0, 1]) + return qml.state() + + result = qml.draw(circuit)(p1=x, p3=z) + expected = """\ + 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ State + 1: ──RY(0.06)───────────╰X──╰┤ State +""" + assert result == expected + + def test_jacobian(self, dev_name, diff_method, mode, mocker, tol): + """Test jacobian calculation""" + if diff_method == "parameter-shift": + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + elif diff_method == "finite-diff": + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a = tf.Variable(0.1, dtype=tf.float64) + b = tf.Variable(0.2, dtype=tf.float64) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))] + + with tf.GradientTape() as tape: + res = circuit(a, b) + + assert circuit.qtape.trainable_params == {0, 1} + + assert isinstance(res, tf.Tensor) + assert res.shape == (2,) + + expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = tape.jacobian(res, [a, b]) + expected = [[-tf.sin(a), tf.sin(a) * tf.sin(b)], [0, -tf.cos(a) * tf.cos(b)]] + assert np.allclose(res, expected, atol=tol, rtol=0) + + if diff_method in ("parameter-shift", "finite-diff"): + spy.assert_called() + + @pytest.mark.xfail + def test_jacobian_dtype(self, dev_name, diff_method, mode, tol): + """Test calculating the jacobian with a different datatype""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + a = tf.Variable(0.1, dtype=tf.float32) + b = tf.Variable(0.2, dtype=tf.float32) + + dev = qml.device("default.qubit", wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))] + + circuit.to_tf(dtype=tf.float32) + assert circuit.dtype is tf.float32 + + with tf.GradientTape() as tape: + res = circuit(a, b) + + assert circuit.qtape.interface == "tf" + assert circuit.qtape.trainable_params == {0, 1} + + assert isinstance(res, tf.Tensor) + assert res.shape == (2,) + assert res.dtype is tf.float32 + + res = tape.jacobian(res, [a, b]) + assert [r.dtype is tf.float32 for r in res] + + def test_jacobian_options(self, dev_name, diff_method, mode, mocker, tol): + """Test setting finite-difference jacobian options""" + if diff_method != "finite-diff": + pytest.skip("Test only works with finite diff") + + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a = tf.Variable([0.1, 0.2]) + + dev = qml.device("default.qubit", wires=1) + + @qnode(dev, interface="tf", h=1e-8, approx_order=2, diff_method=diff_method, mode=mode) + def circuit(a): + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + with tf.GradientTape() as tape: + res = circuit(a) + + tape.jacobian(res, a) + + for args in spy.call_args_list: + assert args[1]["approx_order"] == 2 + assert args[1]["h"] == 1e-8 + + def test_changing_trainability(self, dev_name, diff_method, mode, mocker, tol): + """Test changing the trainability of parameters changes the + number of differentiation requests made""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + a = tf.Variable(0.1, dtype=tf.float64) + b = tf.Variable(0.2, dtype=tf.float64) + + dev = qml.device("default.qubit", wires=2) + + @qnode(dev, interface="tf", diff_method="parameter-shift") + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) + + with tf.GradientTape() as tape: + res = circuit(a, b) + + # the tape has reported both gate arguments as trainable + assert circuit.qtape.trainable_params == {0, 1} + + expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] + assert np.allclose(res, expected, atol=tol, rtol=0) + + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + + jac = tape.jacobian(res, [a, b]) + expected = [ + [-tf.sin(a), tf.sin(a) * tf.sin(b)], + [0, -tf.cos(a) * tf.cos(b)], + ] + assert np.allclose(jac, expected, atol=tol, rtol=0) + + # The parameter-shift rule has been called for each argument + assert len(spy.spy_return[0]) == 4 + + # make the second QNode argument a constant + a = tf.Variable(0.54, dtype=tf.float64) + b = tf.constant(0.8, dtype=tf.float64) + + with tf.GradientTape() as tape: + res = circuit(a, b) + + # the tape has reported only the first argument as trainable + assert circuit.qtape.trainable_params == {0} + + expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] + assert np.allclose(res, expected, atol=tol, rtol=0) + + spy.call_args_list = [] + jac = tape.jacobian(res, a) + expected = [-tf.sin(a), tf.sin(a) * tf.sin(b)] + assert np.allclose(jac, expected, atol=tol, rtol=0) + + # the gradient transform has only been called once + assert len(spy.call_args_list) == 1 + + def test_classical_processing(self, dev_name, diff_method, mode, tol): + """Test classical processing within the quantum tape""" + a = tf.Variable(0.1, dtype=tf.float64) + b = tf.constant(0.2, dtype=tf.float64) + c = tf.Variable(0.3, dtype=tf.float64) + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(x, y, z): + qml.RY(x * z, wires=0) + qml.RZ(y, wires=0) + qml.RX(z + z ** 2 + tf.sin(a), wires=0) + return qml.expval(qml.PauliZ(0)) + + with tf.GradientTape() as tape: + res = circuit(a, b, c) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {0, 2} + assert circuit.qtape.get_parameters() == [a * c, c + c ** 2 + tf.sin(a)] + + res = tape.jacobian(res, [a, b, c]) + + assert isinstance(res[0], tf.Tensor) + assert res[1] is None + assert isinstance(res[2], tf.Tensor) + + def test_no_trainable_parameters(self, dev_name, diff_method, mode, tol): + """Test evaluation if there are no trainable parameters""" + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=0) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) + + a = 0.1 + b = tf.constant(0.2, dtype=tf.float64) + + with tf.GradientTape() as tape: + res = circuit(a, b) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == set() + + assert res.shape == (2,) + assert isinstance(res, tf.Tensor) + + @pytest.mark.parametrize("U", [tf.constant([[0, 1], [1, 0]]), np.array([[0, 1], [1, 0]])]) + def test_matrix_parameter(self, dev_name, diff_method, mode, U, tol): + """Test that the TF interface works correctly + with a matrix parameter""" + a = tf.Variable(0.1, dtype=tf.float64) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(U, a): + qml.QubitUnitary(U, wires=0) + qml.RY(a, wires=0) + return qml.expval(qml.PauliZ(0)) + + with tf.GradientTape() as tape: + res = circuit(U, a) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1} + + assert np.allclose(res, -tf.cos(a), atol=tol, rtol=0) + + res = tape.jacobian(res, a) + assert np.allclose(res, tf.sin(a), atol=tol, rtol=0) + + @pytest.mark.xfail + def test_differentiable_expand(self, dev_name, diff_method, mode, tol): + """Test that operation and nested tapes expansion + is differentiable""" + + class U3(qml.U3): + def expand(self): + theta, phi, lam = self.data + wires = self.wires + + with JacobianTape() as tape: + qml.Rot(lam, theta, -lam, wires=wires) + qml.PhaseShift(phi + lam, wires=wires) + + return tape + + dev = qml.device(dev_name, wires=1) + a = np.array(0.1) + p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(a, p): + qml.RX(a, wires=0) + U3(p[0], p[1], p[2], wires=0) + return qml.expval(qml.PauliX(0)) + + with tf.GradientTape() as tape: + res = circuit(a, p) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1, 2, 3, 4} + elif diff_method == "backprop": + # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() + # is never called. As a result, JacobianTape.trainable_params is never set --- the ML + # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. + assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + + assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] + + if diff_method == "finite-diff": + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + elif diff_method == "backprop": + assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + + expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * ( + tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2]) + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = tape.jacobian(res, p) + expected = np.array( + [ + tf.cos(p[1]) * (tf.cos(a) * tf.cos(p[0]) - tf.sin(a) * tf.sin(p[0]) * tf.sin(p[2])), + tf.cos(p[1]) * tf.cos(p[2]) * tf.sin(a) + - tf.sin(p[1]) + * (tf.cos(a) * tf.sin(p[0]) + tf.cos(p[0]) * tf.sin(a) * tf.sin(p[2])), + tf.sin(a) + * (tf.cos(p[0]) * tf.cos(p[1]) * tf.cos(p[2]) - tf.sin(p[1]) * tf.sin(p[2])), + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + +class TestShotsIntegration: + """Test that the QNode correctly changes shot value, and + differentiates it.""" + + def test_changing_shots(self, mocker, tol): + """Test that changing shots works on execution""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = [0.543, -0.654] + weights = tf.Variable([a, b], dtype=tf.float64) + + @qnode(dev, interface="tf", diff_method=qml.gradients.param_shift) + def circuit(weights): + qml.RY(weights[0], wires=0) + qml.RX(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + spy = mocker.spy(dev, "sample") + + # execute with device default shots (None) + res = circuit(weights) + assert np.allclose(res, -np.cos(a) * np.sin(b), atol=tol, rtol=0) + spy.assert_not_called() + + # execute with shots=100 + res = circuit(weights, shots=100) + spy.assert_called() + assert spy.spy_return.shape == (100,) + + # device state has been unaffected + assert dev.shots is None + spy = mocker.spy(dev, "sample") + res = circuit(weights) + assert np.allclose(res, -np.cos(a) * np.sin(b), atol=tol, rtol=0) + spy.assert_not_called() + + def test_gradient_integration(self, tol): + """Test that temporarily setting the shots works + for gradient computations""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = [0.543, -0.654] + weights = tf.Variable([a, b], dtype=tf.float64) + + @qnode(dev, interface="tf", diff_method=qml.gradients.param_shift) + def circuit(weights): + qml.RY(weights[0], wires=0) + qml.RX(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + with tf.GradientTape() as tape: + res = circuit(weights, shots=[10000, 10000, 10000]) + res = tf.transpose(tf.stack(res)) + + assert dev.shots is None + assert len(res) == 3 + + jacobian = tape.jacobian(res, weights) + expected = [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] + assert np.allclose(np.mean(jacobian, axis=0), expected, atol=0.1, rtol=0) + + def test_multiple_gradient_integration(self, tol): + """Test that temporarily setting the shots works + for gradient computations, even if the QNode has been re-evaluated + with a different number of shots in the meantime.""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = [0.543, -0.654] + weights = tf.Variable([a, b], dtype=tf.float64) + + @qnode(dev, interface="tf", diff_method=qml.gradients.param_shift) + def circuit(weights): + qml.RY(weights[0], wires=0) + qml.RX(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + with tf.GradientTape() as tape: + res1 = circuit(weights) + + assert qml.math.shape(res1) == tuple() + + res2 = circuit(weights, shots=[(1, 1000)]) + assert qml.math.shape(res2) == (1000,) + + grad = tape.gradient(res1, weights) + expected = [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + + def test_update_diff_method(self, mocker, tol): + """Test that temporarily setting the shots updates the diff method""" + dev = qml.device("default.qubit", wires=2, shots=100) + weights = tf.Variable([0.543, -0.654], dtype=tf.float64) + + spy = mocker.spy(qml, "execute") + + @qnode(dev, interface="tf") + def circuit(weights): + qml.RY(weights[0], wires=0) + qml.RX(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + # since we are using finite shots, parameter-shift will + # be chosen + assert circuit.gradient_fn is qml.gradients.param_shift + + circuit(weights) + assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift + + # if we set the shots to None, backprop can now be used + circuit(weights, shots=None) + assert spy.call_args[1]["gradient_fn"] == "backprop" + + # original QNode settings are unaffected + assert circuit.gradient_fn is qml.gradients.param_shift + circuit(weights) + assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift + + +class TestAdjoint: + """Specific integration tests for the adjoint method""" + + def test_reuse_state(self, mocker): + """Tests that the TF interface reuses the device state for adjoint differentiation""" + dev = qml.device("default.qubit", wires=2) + + @qnode(dev, diff_method="adjoint", interface="tf") + def circ(x): + qml.RX(x[0], wires=0) + qml.RY(x[1], wires=1) + qml.CNOT(wires=(0, 1)) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliX(1)) + + spy = mocker.spy(dev, "adjoint_jacobian") + + weights = tf.Variable([0.1, 0.2], dtype=tf.float64) + x, y = 1.0 * weights + + with tf.GradientTape() as tape: + res = tf.reduce_sum(circ(weights)) + + grad = tape.gradient(res, weights) + expected_grad = [-tf.sin(x), tf.cos(y)] + + assert np.allclose(grad, expected_grad) + assert circ.device.num_executions == 1 + spy.assert_called_with(mocker.ANY, use_device_state=mocker.ANY) + + def test_resuse_state_multiple_evals(self, mocker, tol): + """Tests that the TF interface reuses the device state for adjoint differentiation, + even where there are intermediate evaluations.""" + dev = qml.device("default.qubit", wires=2) + + x_val = 0.543 + y_val = -0.654 + x = tf.Variable(x_val, dtype=tf.float64) + y = tf.Variable(y_val, dtype=tf.float64) + + @qnode(dev, diff_method="adjoint", interface="tf") + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + spy = mocker.spy(dev, "adjoint_jacobian") + + with tf.GradientTape() as tape: + res1 = circuit(x, y) + + assert np.allclose(res1, np.cos(x_val), atol=tol, rtol=0) + + # intermediate evaluation with different values + res2 = circuit(tf.math.tan(x), tf.math.cosh(y)) + + # the adjoint method will continue to compute the correct derivative + grad = tape.gradient(res1, x) + assert np.allclose(grad, -np.sin(x_val), atol=tol, rtol=0) + assert dev.num_executions == 2 + spy.assert_called_with(mocker.ANY, use_device_state=mocker.ANY) + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestQubitIntegration: + """Tests that ensure various qubit circuits integrate correctly""" + + def test_probability_differentiation(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with multiple probs outputs""" + + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=[0]), qml.probs(wires=[1]) + + with tf.GradientTape() as tape: + res = circuit(x, y) + + expected = np.array( + [ + [tf.cos(x / 2) ** 2, tf.sin(x / 2) ** 2], + [(1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = tape.jacobian(res, [x, y]) + expected = np.array( + [ + [ + [-tf.sin(x) / 2, tf.sin(x) / 2], + [-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2], + ], + [ + [0, 0], + [-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2], + ], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_ragged_differentiation(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with prob and expval outputs""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.probs(wires=[1])] + + with tf.GradientTape() as tape: + res = circuit(x, y) + + expected = np.array( + [ + tf.cos(x), + (1 + tf.cos(x) * tf.cos(y)) / 2, + (1 - tf.cos(x) * tf.cos(y)) / 2, + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = tape.jacobian(res, [x, y]) + expected = np.array( + [ + [-tf.sin(x), -tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2], + [0, -tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + def test_sampling(self, dev_name, diff_method, mode): + """Test sampling works as expected""" + if mode == "forward": + pytest.skip("Sampling not possible with forward mode differentiation.") + + dev = qml.device(dev_name, wires=2, shots=10) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="tf") + def circuit(): + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + return [qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))] + + with tf.GradientTape() as tape: + res = circuit() + + assert res.shape == (2, 10) + assert isinstance(res, tf.Tensor) + + def test_second_derivative(self, dev_name, diff_method, mode, tol): + """Test second derivative calculation of a scalar valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="tf") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = tf.Variable([1.0, 2.0], dtype=tf.float64) + + with tf.GradientTape() as tape1: + with tf.GradientTape() as tape2: + res = circuit(x) + g = tape2.gradient(res, x) + res2 = tf.reduce_sum(g) + + g2 = tape1.gradient(res2, x) + a, b = x * 1.0 + + expected_res = tf.cos(a) * tf.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_g2 = [ + -tf.cos(a) * tf.cos(b) + tf.sin(a) * tf.sin(b), + tf.sin(a) * tf.sin(b) - tf.cos(a) * tf.cos(b), + ] + assert np.allclose(g2, expected_g2, atol=tol, rtol=0) + + def test_hessian(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a scalar valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="tf") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = tf.Variable([1.0, 2.0], dtype=tf.float64) + + with tf.GradientTape() as tape1: + with tf.GradientTape() as tape2: + res = circuit(x) + g = tape2.gradient(res, x) + + hess = tape1.jacobian(g, x) + a, b = x * 1.0 + + expected_res = tf.cos(a) * tf.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_hess = [ + [-tf.cos(a) * tf.cos(b), tf.sin(a) * tf.sin(b)], + [tf.sin(a) * tf.sin(b), -tf.cos(a) * tf.cos(b)], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_vector_valued(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a vector valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="tf") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.probs(wires=0) + + x = tf.Variable([1.0, 2.0], dtype=tf.float64) + + with tf.GradientTape() as tape1: + with tf.GradientTape(persistent=True) as tape2: + res = circuit(x) + g = tape2.jacobian(res, x, experimental_use_pfor=False) + + hess = tape1.jacobian(g, x) + + a, b = x * 1.0 + + expected_res = [ + 0.5 + 0.5 * tf.cos(a) * tf.cos(b), + 0.5 - 0.5 * tf.cos(a) * tf.cos(b), + ] + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [ + [-0.5 * tf.sin(a) * tf.cos(b), -0.5 * tf.cos(a) * tf.sin(b)], + [0.5 * tf.sin(a) * tf.cos(b), 0.5 * tf.cos(a) * tf.sin(b)], + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_hess = [ + [ + [-0.5 * tf.cos(a) * tf.cos(b), 0.5 * tf.sin(a) * tf.sin(b)], + [0.5 * tf.sin(a) * tf.sin(b), -0.5 * tf.cos(a) * tf.cos(b)], + ], + [ + [0.5 * tf.cos(a) * tf.cos(b), -0.5 * tf.sin(a) * tf.sin(b)], + [-0.5 * tf.sin(a) * tf.sin(b), 0.5 * tf.cos(a) * tf.cos(b)], + ], + ] + np.testing.assert_allclose(hess, expected_hess, atol=tol, rtol=0, verbose=True) + + def test_hessian_vector_valued_postprocessing(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a vector valued QNode with post-processing""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="tf") + def circuit(x): + qml.RX(x[0], wires=0) + qml.RY(x[1], wires=0) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(0))] + + x = tf.Variable([0.76, -0.87], dtype=tf.float64) + + with tf.GradientTape() as tape1: + with tf.GradientTape(persistent=True) as tape2: + res = tf.tensordot(x, circuit(x), axes=[0, 0]) + + g = tape2.jacobian(res, x, experimental_use_pfor=False) + + hess = tape1.jacobian(g, x) + a, b = x * 1.0 + + expected_res = a * tf.cos(a) * tf.cos(b) + b * tf.cos(a) * tf.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [ + tf.cos(b) * (tf.cos(a) - (a + b) * tf.sin(a)), + tf.cos(a) * (tf.cos(b) - (a + b) * tf.sin(b)), + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_hess = [ + [ + -(tf.cos(b) * ((a + b) * tf.cos(a) + 2 * tf.sin(a))), + -(tf.cos(b) * tf.sin(a)) + (-tf.cos(a) + (a + b) * tf.sin(a)) * tf.sin(b), + ], + [ + -(tf.cos(b) * tf.sin(a)) + (-tf.cos(a) + (a + b) * tf.sin(a)) * tf.sin(b), + -(tf.cos(a) * ((a + b) * tf.cos(b) + 2 * tf.sin(b))), + ], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_hessian_ragged(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a ragged QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="tf") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + qml.RY(x[0], wires=1) + qml.RX(x[1], wires=1) + return qml.expval(qml.PauliZ(0)), qml.probs(wires=1) + + x = tf.Variable([1.0, 2.0], dtype=tf.float64) + res = circuit(x) + + with tf.GradientTape() as tape1: + with tf.GradientTape(persistent=True) as tape2: + res = circuit(x) + g = tape2.jacobian(res, x, experimental_use_pfor=False) + + hess = tape1.jacobian(g, x) + a, b = x * 1.0 + + expected_res = [ + tf.cos(a) * tf.cos(b), + 0.5 + 0.5 * tf.cos(a) * tf.cos(b), + 0.5 - 0.5 * tf.cos(a) * tf.cos(b), + ] + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [ + [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)], + [-0.5 * tf.sin(a) * tf.cos(b), -0.5 * tf.cos(a) * tf.sin(b)], + [0.5 * tf.sin(a) * tf.cos(b), 0.5 * tf.cos(a) * tf.sin(b)], + ] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_hess = [ + [ + [-tf.cos(a) * tf.cos(b), tf.sin(a) * tf.sin(b)], + [tf.sin(a) * tf.sin(b), -tf.cos(a) * tf.cos(b)], + ], + [ + [-0.5 * tf.cos(a) * tf.cos(b), 0.5 * tf.sin(a) * tf.sin(b)], + [0.5 * tf.sin(a) * tf.sin(b), -0.5 * tf.cos(a) * tf.cos(b)], + ], + [ + [0.5 * tf.cos(a) * tf.cos(b), -0.5 * tf.sin(a) * tf.sin(b)], + [-0.5 * tf.sin(a) * tf.sin(b), 0.5 * tf.cos(a) * tf.cos(b)], + ], + ] + np.testing.assert_allclose(hess, expected_hess, atol=tol, rtol=0, verbose=True) + + def test_state(self, dev_name, diff_method, mode, tol): + """Test that the state can be returned and differentiated""" + if diff_method == "adjoint": + pytest.skip("Adjoint does not support states") + + dev = qml.device(dev_name, wires=2) + + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @qnode(dev, diff_method=diff_method, interface="tf", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.state() + + def cost_fn(x, y): + res = circuit(x, y) + assert res.dtype is tf.complex128 + probs = tf.math.abs(res) ** 2 + return probs[0] + probs[2] + + with tf.GradientTape() as tape: + res = cost_fn(x, y) + + if diff_method not in {"backprop"}: + pytest.skip("Test only supports backprop") + + grad = tape.gradient(res, [x, y]) + expected = [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2] + assert np.allclose(grad, expected, atol=tol, rtol=0) + + def test_projector(self, dev_name, diff_method, mode, tol): + """Test that the variance of a projector is correctly returned""" + if diff_method == "adjoint": + pytest.skip("Adjoint does not support projectors") + + dev = qml.device(dev_name, wires=2) + P = tf.constant([1]) + + x, y = 0.765, -0.654 + weights = tf.Variable([x, y], dtype=tf.float64) + + @qnode(dev, diff_method=diff_method, interface="tf", mode=mode) + def circuit(weights): + qml.RX(weights[0], wires=0) + qml.RY(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + return qml.var(qml.Projector(P, wires=0) @ qml.PauliX(1)) + + with tf.GradientTape() as tape: + res = circuit(weights) + + expected = 0.25 * np.sin(x / 2) ** 2 * (3 + np.cos(2 * y) + 2 * np.cos(x) * np.sin(y) ** 2) + assert np.allclose(res, expected, atol=tol, rtol=0) + + grad = tape.gradient(res, weights) + expected = [ + 0.5 * np.sin(x) * (np.cos(x / 2) ** 2 + np.cos(2 * y) * np.sin(x / 2) ** 2), + -2 * np.cos(y) * np.sin(x / 2) ** 4 * np.sin(y), + ] + assert np.allclose(grad, expected, atol=tol, rtol=0) + + +@pytest.mark.parametrize( + "diff_method,kwargs", + [["finite-diff", {}], ("parameter-shift", {}), ("parameter-shift", {"force_order2": True})], +) +class TestCV: + """Tests for CV integration""" + + def test_first_order_observable(self, diff_method, kwargs, tol): + """Test variance of a first order CV observable""" + dev = qml.device("default.gaussian", wires=1) + + r = tf.Variable(0.543, dtype=tf.float64) + phi = tf.Variable(-0.654, dtype=tf.float64) + + @qnode(dev, interface="tf", diff_method=diff_method, **kwargs) + def circuit(r, phi): + qml.Squeezing(r, 0, wires=0) + qml.Rotation(phi, wires=0) + return qml.var(qml.X(0)) + + with tf.GradientTape() as tape: + res = circuit(r, phi) + + expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2 + assert np.allclose(res, expected, atol=tol, rtol=0) + + # circuit jacobians + grad = tape.gradient(res, [r, phi]) + expected = [ + 2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2, + 2 * np.sinh(2 * r) * np.sin(2 * phi), + ] + assert np.allclose(grad, expected, atol=tol, rtol=0) + + def test_second_order_observable(self, diff_method, kwargs, tol): + """Test variance of a second order CV expectation value""" + dev = qml.device("default.gaussian", wires=1) + + n = tf.Variable(0.12, dtype=tf.float64) + a = tf.Variable(0.765, dtype=tf.float64) + + @qnode(dev, interface="tf", diff_method=diff_method, **kwargs) + def circuit(n, a): + qml.ThermalState(n, wires=0) + qml.Displacement(a, 0, wires=0) + return qml.var(qml.NumberOperator(0)) + + with tf.GradientTape() as tape: + res = circuit(n, a) + + expected = n ** 2 + n + np.abs(a) ** 2 * (1 + 2 * n) + assert np.allclose(res, expected, atol=tol, rtol=0) + + # circuit jacobians + grad = tape.gradient(res, [n, a]) + expected = [2 * a ** 2 + 2 * n + 1, 2 * a * (2 * n + 1)] + assert np.allclose(grad, expected, atol=tol, rtol=0) diff --git a/tests/interfaces/test_batch_torch.py b/tests/interfaces/test_batch_torch.py index 2c9db85c427..ba0e0ca41f7 100644 --- a/tests/interfaces/test_batch_torch.py +++ b/tests/interfaces/test_batch_torch.py @@ -439,35 +439,6 @@ def test_jacobian(self, torch_device, execute_kwargs, tol): assert torch.allclose(a.grad, expected[0], atol=tol, rtol=0) assert torch.allclose(b.grad, expected[1], atol=tol, rtol=0) - def test_tape_no_parameters(self, torch_device, execute_kwargs, tol): - """Test that a tape with no parameters is correctly - ignored during the gradient computation""" - dev = qml.device("default.qubit", wires=1) - params = torch.tensor([0.1, 0.2], requires_grad=True, device=torch_device) - x, y = params.detach() - - with qml.tape.JacobianTape() as tape1: - qml.Hadamard(0) - qml.expval(qml.PauliX(0)) - - with qml.tape.JacobianTape() as tape2: - qml.RY(0.5, wires=0) - qml.expval(qml.PauliZ(0)) - - with qml.tape.JacobianTape() as tape3: - qml.RY(params[0], wires=0) - qml.RX(params[1], wires=0) - qml.expval(qml.PauliZ(0)) - - res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) - expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) - assert np.allclose(res.detach(), expected, atol=tol, rtol=0) - - res.backward() - grad = params.grad.detach() - expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] - assert np.allclose(grad, expected, atol=tol, rtol=0) - def test_reusing_quantum_tape(self, torch_device, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = torch.tensor(0.1, requires_grad=True, device=torch_device) @@ -789,27 +760,6 @@ def test_sampling(self, torch_device, execute_kwargs): assert res.shape == (2, 10) assert isinstance(res, torch.Tensor) - def test_sampling_expval(self, torch_device, execute_kwargs): - """Test sampling works as expected if combined with expectation values""" - if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward": - pytest.skip("Adjoint differentiation does not support samples") - - dev = qml.device("default.qubit", wires=2, shots=10) - - with qml.tape.JacobianTape() as tape: - qml.Hadamard(wires=[0]) - qml.CNOT(wires=[0, 1]) - qml.sample(qml.PauliZ(0)) - qml.expval(qml.PauliX(1)) - - res = execute([tape], dev, **execute_kwargs)[0] - - assert len(res) == 2 - assert isinstance(res, tuple) - assert res[0].shape == (10,) - assert isinstance(res[0], torch.Tensor) - assert isinstance(res[1], torch.Tensor) - def test_sampling_gradient_error(self, torch_device, execute_kwargs): """Test differentiating a tape with sampling results in an error""" if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward": diff --git a/tests/interfaces/test_batch_torch_qnode.py b/tests/interfaces/test_batch_torch_qnode.py new file mode 100644 index 00000000000..8b10f72b55b --- /dev/null +++ b/tests/interfaces/test_batch_torch_qnode.py @@ -0,0 +1,1099 @@ +# Copyright 2018-2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Integration tests for using the Torch interface with a QNode""" +import pytest +import numpy as np + +torch = pytest.importorskip("torch", minversion="1.3") +from torch.autograd.functional import hessian, jacobian + +import pennylane as qml +from pennylane.new_qnode import qnode, QNode +from pennylane.tape import JacobianTape + + +qubit_device_and_diff_method = [ + ["default.qubit", "finite-diff", "backward"], + ["default.qubit", "parameter-shift", "backward"], + ["default.qubit", "backprop", "forward"], + ["default.qubit", "adjoint", "forward"], + ["default.qubit", "adjoint", "backward"], +] + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestQNode: + """Test that using the QNode with Torch integrates with the PennyLane stack""" + + def test_execution_with_interface(self, dev_name, diff_method, mode): + """Test execution works with the interface""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(a): + qml.RY(a, wires=0) + qml.RX(0.2, wires=0) + return qml.expval(qml.PauliZ(0)) + + a = torch.tensor(0.1, requires_grad=True) + res = circuit(a) + + assert circuit.interface == "torch" + + # with the interface, the tape returns torch tensors + + assert isinstance(res, torch.Tensor) + assert res.shape == tuple() + + # the tape is able to deduce trainable parameters + assert circuit.qtape.trainable_params == {0} + + # gradients should work + res.backward() + grad = a.grad + assert isinstance(grad, torch.Tensor) + assert grad.shape == tuple() + + def test_interface_swap(self, dev_name, diff_method, mode, tol): + """Test that the Torch interface can be applied to a QNode + with a pre-existing interface""" + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, interface="autograd") + def circuit(a): + qml.RY(a, wires=0) + qml.RX(0.2, wires=0) + return qml.expval(qml.PauliZ(0)) + + from pennylane import numpy as anp + + a = anp.array(0.1, requires_grad=True) + + res1 = circuit(a) + grad_fn = qml.grad(circuit) + grad1 = grad_fn(a) + + # switch to Torch interface + circuit.interface = "torch" + + a = torch.tensor(0.1, dtype=torch.float64, requires_grad=True) + + res2 = circuit(a) + res2.backward() + grad2 = a.grad + assert np.allclose(res1, res2.detach().numpy(), atol=tol, rtol=0) + assert np.allclose(grad1, grad2, atol=tol, rtol=0) + + def test_drawing(self, dev_name, diff_method, mode): + """Test circuit drawing when using the torch interface""" + + x = torch.tensor(0.1, requires_grad=True) + y = torch.tensor([0.2, 0.3], requires_grad=True) + z = torch.tensor(0.4, requires_grad=True) + + dev = qml.device("default.qubit", wires=2) + + @qnode(dev, interface="torch") + def circuit(p1, p2=y, **kwargs): + qml.RX(p1, wires=0) + qml.RY(p2[0] * p2[1], wires=1) + qml.RX(kwargs["p3"], wires=0) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=0), qml.var(qml.PauliZ(1)) + + circuit(p1=x, p3=z) + + result = qml.draw(circuit)(p1=x, p3=z) + expected = """\ + 0: ──RX(0.1)───RX(0.4)──╭C──┤ Probs + 1: ──RY(0.06)───────────╰X──┤ Var[Z] +""" + + assert result == expected + + def test_jacobian(self, dev_name, diff_method, mode, mocker, tol): + """Test jacobian calculation""" + if diff_method == "parameter-shift": + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + elif diff_method == "finite-diff": + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a_val = 0.1 + b_val = 0.2 + + a = torch.tensor(a_val, dtype=torch.float64, requires_grad=True) + b = torch.tensor(b_val, dtype=torch.float64, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))] + + res = circuit(a, b) + + assert circuit.qtape.trainable_params == {0, 1} + + assert isinstance(res, torch.Tensor) + assert res.shape == (2,) + + expected = [np.cos(a_val), -np.cos(a_val) * np.sin(b_val)] + assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) + + loss = torch.sum(res) + + loss.backward() + expected = [ + -np.sin(a_val) + np.sin(a_val) * np.sin(b_val), + -np.cos(a_val) * np.cos(b_val), + ] + assert np.allclose(a.grad, expected[0], atol=tol, rtol=0) + assert np.allclose(b.grad, expected[1], atol=tol, rtol=0) + + if diff_method in ("parameter-shift", "finite-diff"): + spy.assert_called() + + @pytest.mark.xfail + def test_jacobian_dtype(self, dev_name, diff_method, mode, tol): + """Test calculating the jacobian with a different datatype""" + if diff_method == "backprop": + pytest.skip("Test does not support backprop") + + a = torch.tensor(0.1, dtype=torch.float32, requires_grad=True) + b = torch.tensor(0.2, dtype=torch.float32, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, interface="torch", diff_method=diff_method) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))] + + res = circuit(a, b) + + assert circuit.interface == "torch" + assert circuit.qtape.trainable_params == {0, 1} + + assert isinstance(res, torch.Tensor) + assert res.shape == (2,) + assert res.dtype is torch.float32 + + loss = torch.sum(res) + loss.backward() + assert a.grad.dtype is torch.float32 + assert b.grad.dtype is torch.float32 + + def test_jacobian_options(self, dev_name, diff_method, mode, mocker, tol): + """Test setting jacobian options""" + if diff_method != "finite-diff": + pytest.skip("Test only works with finite-diff") + + spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") + + a = torch.tensor([0.1, 0.2], requires_grad=True) + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch", h=1e-8, approx_order=2) + def circuit(a): + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + res = circuit(a) + res.backward() + + for args in spy.call_args_list: + assert args[1]["approx_order"] == 2 + assert args[1]["h"] == 1e-8 + + def test_changing_trainability(self, dev_name, diff_method, mode, mocker, tol): + """Test that changing the trainability of parameters changes the + number of differentiation requests made""" + if diff_method != "parameter-shift": + pytest.skip("Test only supports parameter-shift") + + a_val = 0.1 + b_val = 0.2 + + a = torch.tensor(a_val, dtype=torch.float64, requires_grad=True) + b = torch.tensor(b_val, dtype=torch.float64, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, interface="torch", diff_method=diff_method) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) + + res = circuit(a, b) + + # the tape has reported both gate arguments as trainable + assert circuit.qtape.trainable_params == {0, 1} + + expected = [np.cos(a_val), -np.cos(a_val) * np.sin(b_val)] + assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) + + spy = mocker.spy(qml.gradients.param_shift, "transform_fn") + + loss = torch.sum(res) + loss.backward() + + expected = [ + -np.sin(a_val) + np.sin(a_val) * np.sin(b_val), + -np.cos(a_val) * np.cos(b_val), + ] + assert np.allclose([a.grad, b.grad], expected, atol=tol, rtol=0) + + # The parameter-shift rule has been called for each argument + assert len(spy.spy_return[0]) == 4 + + # make the second QNode argument a constant + a_val = 0.54 + b_val = 0.8 + + a = torch.tensor(a_val, dtype=torch.float64, requires_grad=True) + b = torch.tensor(b_val, dtype=torch.float64, requires_grad=False) + + res = circuit(a, b) + + # the tape has reported only the first argument as trainable + assert circuit.qtape.trainable_params == {0} + + expected = [np.cos(a_val), -np.cos(a_val) * np.sin(b_val)] + assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) + + spy.call_args_list = [] + loss = torch.sum(res) + loss.backward() + expected = -np.sin(a_val) + np.sin(a_val) * np.sin(b_val) + assert np.allclose(a.grad, expected, atol=tol, rtol=0) + + # the gradient transform has only been called once + assert len(spy.call_args_list) == 1 + + def test_classical_processing(self, dev_name, diff_method, mode, tol): + """Test classical processing within the quantum tape""" + a = torch.tensor(0.1, dtype=torch.float64, requires_grad=True) + b = torch.tensor(0.2, dtype=torch.float64, requires_grad=False) + c = torch.tensor(0.3, dtype=torch.float64, requires_grad=True) + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(a, b, c): + qml.RY(a * c, wires=0) + qml.RZ(b, wires=0) + qml.RX(c + c ** 2 + torch.sin(a), wires=0) + return qml.expval(qml.PauliZ(0)) + + res = circuit(a, b, c) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {0, 2} + assert circuit.qtape.get_parameters() == [a * c, c + c ** 2 + torch.sin(a)] + + res.backward() + + assert isinstance(a.grad, torch.Tensor) + assert b.grad is None + assert isinstance(c.grad, torch.Tensor) + + def test_no_trainable_parameters(self, dev_name, diff_method, mode, tol): + """Test evaluation and Jacobian if there are no trainable parameters""" + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=0) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) + + a = 0.1 + b = torch.tensor(0.2, dtype=torch.float64, requires_grad=False) + + res = circuit(a, b) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == set() + + assert res.shape == (2,) + assert isinstance(res, torch.Tensor) + + with pytest.raises( + RuntimeError, + match="element 0 of tensors does not require grad and does not have a grad_fn", + ): + res.backward() + + @pytest.mark.parametrize( + "U", + [ + torch.tensor([[0, 1], [1, 0]], requires_grad=False), + np.array([[0, 1], [1, 0]]), + ], + ) + def test_matrix_parameter(self, dev_name, diff_method, mode, U, tol): + """Test that the Torch interface works correctly + with a matrix parameter""" + a_val = 0.1 + a = torch.tensor(a_val, dtype=torch.float64, requires_grad=True) + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(U, a): + qml.QubitUnitary(U, wires=0) + qml.RY(a, wires=0) + return qml.expval(qml.PauliZ(0)) + + res = circuit(U, a) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1} + + assert np.allclose(res.detach(), -np.cos(a_val), atol=tol, rtol=0) + + res.backward() + assert np.allclose(a.grad, np.sin(a_val), atol=tol, rtol=0) + + @pytest.mark.xfail + def test_differentiable_expand(self, dev_name, diff_method, mode, tol): + """Test that operation and nested tapes expansion + is differentiable""" + + class U3(qml.U3): + def expand(self): + theta, phi, lam = self.data + wires = self.wires + + with JacobianTape() as tape: + qml.Rot(lam, theta, -lam, wires=wires) + qml.PhaseShift(phi + lam, wires=wires) + + return tape + + dev = qml.device(dev_name, wires=1) + a = np.array(0.1) + p_val = [0.1, 0.2, 0.3] + p = torch.tensor(p_val, dtype=torch.float64, requires_grad=True) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(a, p): + qml.RX(a, wires=0) + U3(p[0], p[1], p[2], wires=0) + return qml.expval(qml.PauliX(0)) + + res = circuit(a, p) + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1, 2, 3, 4} + elif diff_method == "backprop": + # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() + # is never called. As a result, JacobianTape.trainable_params is never set --- the ML + # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. + assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + + assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] + + if diff_method == "finite-diff": + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + elif diff_method == "backprop": + assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + + expected = np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0]) + np.sin(a) * ( + np.cos(p_val[2]) * np.sin(p_val[1]) + + np.cos(p_val[0]) * np.cos(p_val[1]) * np.sin(p_val[2]) + ) + assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) + + res.backward() + expected = np.array( + [ + np.cos(p_val[1]) + * (np.cos(a) * np.cos(p_val[0]) - np.sin(a) * np.sin(p_val[0]) * np.sin(p_val[2])), + np.cos(p_val[1]) * np.cos(p_val[2]) * np.sin(a) + - np.sin(p_val[1]) + * (np.cos(a) * np.sin(p_val[0]) + np.cos(p_val[0]) * np.sin(a) * np.sin(p_val[2])), + np.sin(a) + * ( + np.cos(p_val[0]) * np.cos(p_val[1]) * np.cos(p_val[2]) + - np.sin(p_val[1]) * np.sin(p_val[2]) + ), + ] + ) + assert np.allclose(p.grad, expected, atol=tol, rtol=0) + + +class TestShotsIntegration: + """Test that the QNode correctly changes shot value, and + differentiates it.""" + + def test_changing_shots(self, mocker, tol): + """Test that changing shots works on execution""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = torch.tensor([0.543, -0.654], requires_grad=True, dtype=torch.float64) + + @qnode(dev, interface="torch", diff_method=qml.gradients.param_shift) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + spy = mocker.spy(dev, "sample") + + # execute with device default shots (None) + res = circuit(a, b) + assert torch.allclose(res, -torch.cos(a) * torch.sin(b), atol=tol, rtol=0) + spy.assert_not_called() + + # execute with shots=100 + res = circuit(a, b, shots=100) + spy.assert_called() + assert spy.spy_return.shape == (100,) + + # device state has been unaffected + assert dev.shots is None + spy = mocker.spy(dev, "sample") + res = circuit(a, b) + assert torch.allclose(res, -torch.cos(a) * torch.sin(b), atol=tol, rtol=0) + spy.assert_not_called() + + def test_gradient_integration(self, tol): + """Test that temporarily setting the shots works + for gradient computations""" + dev = qml.device("default.qubit", wires=2, shots=None) + a, b = torch.tensor([0.543, -0.654], requires_grad=True) + + @qnode(dev, interface="torch", diff_method=qml.gradients.param_shift) + def cost_fn(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + res = jacobian(lambda a, b: cost_fn(a, b, shots=[10000, 10000, 10000]), (a, b)) + res = qml.math.transpose(torch.stack(res)) + assert dev.shots is None + assert len(res) == 3 + + expected = torch.tensor([torch.sin(a) * torch.sin(b), -torch.cos(a) * torch.cos(b)]) + assert torch.allclose(torch.mean(res, axis=0), expected, atol=0.1, rtol=0) + + def test_multiple_gradient_integration(self, tol): + """Test that temporarily setting the shots works + for gradient computations, even if the QNode has been re-evaluated + with a different number of shots in the meantime.""" + dev = qml.device("default.qubit", wires=2, shots=None) + weights = torch.tensor([0.543, -0.654], requires_grad=True) + a, b = weights + + @qnode(dev, interface="torch", diff_method=qml.gradients.param_shift) + def circuit(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + res1 = circuit(*weights) + assert qml.math.shape(res1) == tuple() + + res2 = circuit(*weights, shots=[(1, 1000)]) + assert qml.math.shape(res2) == (1000,) + + res1.backward() + + expected = torch.tensor([torch.sin(a) * torch.sin(b), -torch.cos(a) * torch.cos(b)]) + assert torch.allclose(weights.grad, expected, atol=tol, rtol=0) + + def test_update_diff_method(self, mocker, tol): + """Test that temporarily setting the shots updates the diff method""" + dev = qml.device("default.qubit", wires=2, shots=100) + a, b = torch.tensor([0.543, -0.654], requires_grad=True) + + spy = mocker.spy(qml, "execute") + + @qnode(dev, interface="torch") + def cost_fn(a, b): + qml.RY(a, wires=0) + qml.RX(b, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliY(1)) + + # since we are using finite shots, parameter-shift will + # be chosen + assert cost_fn.gradient_fn is qml.gradients.param_shift + + cost_fn(a, b) + assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift + + # if we set the shots to None, backprop can now be used + cost_fn(a, b, shots=None) + assert spy.call_args[1]["gradient_fn"] == "backprop" + + # original QNode settings are unaffected + assert cost_fn.gradient_fn is qml.gradients.param_shift + cost_fn(a, b) + assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift + + +class TestAdjoint: + """Specific integration tests for the adjoint method""" + + def test_reuse_state(self, mocker): + """Tests that the Torch interface reuses the device state for adjoint differentiation""" + dev = qml.device("default.qubit", wires=2) + + @qnode(dev, diff_method="adjoint", interface="torch") + def circ(x): + qml.RX(x[0], wires=0) + qml.RY(x[1], wires=1) + qml.CNOT(wires=(0, 1)) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliX(1)) + + expected_grad = lambda x: torch.tensor([-torch.sin(x[0]), torch.cos(x[1])]) + + spy = mocker.spy(dev, "adjoint_jacobian") + + x1 = torch.tensor([0.1, 0.2], requires_grad=True) + res1 = circ(x1) + res1.backward(torch.Tensor([1, 1])) + + assert np.allclose(x1.grad, expected_grad(x1)) + assert circ.device.num_executions == 1 + spy.assert_called_with(mocker.ANY, use_device_state=mocker.ANY) + + def test_resuse_state_multiple_evals(self, mocker, tol): + """Tests that the Torch interface reuses the device state for adjoint differentiation, + even where there are intermediate evaluations.""" + dev = qml.device("default.qubit", wires=2) + + x_val = 0.543 + y_val = -0.654 + x = torch.tensor(x_val, requires_grad=True) + y = torch.tensor(y_val, requires_grad=True) + + @qnode(dev, diff_method="adjoint", interface="torch") + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(0)) + + spy = mocker.spy(dev, "adjoint_jacobian") + + res1 = circuit(x, y) + assert np.allclose(res1.detach(), np.cos(x_val), atol=tol, rtol=0) + + # intermediate evaluation with different values + res2 = circuit(torch.tan(x), torch.cosh(y)) + + # the adjoint method will continue to compute the correct derivative + res1.backward() + assert np.allclose(x.grad.detach(), -np.sin(x_val), atol=tol, rtol=0) + assert dev.num_executions == 2 + spy.assert_called_with(mocker.ANY, use_device_state=mocker.ANY) + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestQubitIntegration: + """Tests that ensure various qubit circuits integrate correctly""" + + def test_probability_differentiation(self, dev_name, diff_method, mode, tol): + """Tests correct output shape and evaluation for a tape + with prob and expval outputs""" + + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x_val = 0.543 + y_val = -0.654 + x = torch.tensor(x_val, requires_grad=True) + y = torch.tensor(y_val, requires_grad=True) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=[0]), qml.probs(wires=[1]) + + res = circuit(x, y) + + expected = np.array( + [ + [np.cos(x_val / 2) ** 2, np.sin(x_val / 2) ** 2], + [ + (1 + np.cos(x_val) * np.cos(y_val)) / 2, + (1 - np.cos(x_val) * np.cos(y_val)) / 2, + ], + ] + ) + + if diff_method == "backprop": + # TODO: check why this differs from other interfaces + expected = expected.flatten() + + assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) + + loss = torch.sum(res) + loss.backward() + expected = np.array( + [ + -np.sin(x_val) / 2 + + np.sin(x_val) / 2 + - np.sin(x_val) * np.cos(y_val) / 2 + + np.cos(y_val) * np.sin(x_val) / 2, + -np.cos(x_val) * np.sin(y_val) / 2 + np.cos(x_val) * np.sin(y_val) / 2, + ] + ) + assert np.allclose(x.grad, expected[0], atol=tol, rtol=0) + assert np.allclose(y.grad, expected[1], atol=tol, rtol=0) + + def test_ragged_differentiation(self, dev_name, diff_method, mode, monkeypatch, tol): + """Tests correct output shape and evaluation for a tape + with prob and expval outputs""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not currently support returning probabilities") + + dev = qml.device(dev_name, wires=2) + x_val = 0.543 + y_val = -0.654 + x = torch.tensor(x_val, requires_grad=True) + y = torch.tensor(y_val, requires_grad=True) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return [qml.expval(qml.PauliZ(0)), qml.probs(wires=[1])] + + res = circuit(x, y) + + expected = np.array( + [ + np.cos(x_val), + (1 + np.cos(x_val) * np.cos(y_val)) / 2, + (1 - np.cos(x_val) * np.cos(y_val)) / 2, + ] + ) + assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) + + loss = torch.sum(res) + loss.backward() + expected = np.array( + [ + -np.sin(x_val) + + -np.sin(x_val) * np.cos(y_val) / 2 + + np.cos(y_val) * np.sin(x_val) / 2, + -np.cos(x_val) * np.sin(y_val) / 2 + np.cos(x_val) * np.sin(y_val) / 2, + ] + ) + assert np.allclose(x.grad, expected[0], atol=tol, rtol=0) + assert np.allclose(y.grad, expected[1], atol=tol, rtol=0) + + def test_sampling(self, dev_name, diff_method, mode): + """Test sampling works as expected""" + if mode == "forward": + pytest.skip("Sampling not possible with forward mode differentiation.") + + dev = qml.device(dev_name, wires=2, shots=10) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(): + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + return [qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))] + + res = circuit() + + assert res.shape == (2, 10) + assert isinstance(res, torch.Tensor) + + def test_sampling_expval(self, dev_name, diff_method, mode): + """Test sampling works as expected if combined with expectation values""" + if mode == "forward": + pytest.skip("Sampling not possible with forward mode differentiation.") + + dev = qml.device(dev_name, wires=2, shots=10) + + @qnode(dev, diff_method=diff_method, mode=mode, interface="torch") + def circuit(): + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + return qml.sample(qml.PauliZ(0)), qml.expval(qml.PauliX(1)) + + res = circuit() + + assert len(res) == 2 + assert isinstance(res, tuple) + assert res[0].shape == (10,) + assert isinstance(res[0], torch.Tensor) + assert isinstance(res[1], torch.Tensor) + + @pytest.mark.xfail + def test_chained_qnodes(self, dev_name, diff_method, mode): + """Test that the gradient of chained QNodes works without error""" + dev = qml.device(dev_name, wires=2) + + @qnode(dev, interface="torch", diff_method=diff_method, mode=mode) + def circuit1(weights): + qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) + return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) + + @qnode(dev, interface="torch", diff_method=diff_method, mode=mode) + def circuit2(data, weights): + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) + return qml.expval(qml.PauliX(0)) + + def cost(weights): + w1, w2 = weights + c1 = circuit1(w1) + c2 = circuit2(c1, w2) + return np.sum(c2) ** 2 + + w1 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=3) + w2 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=4) + + w1 = torch.tensor(w1, requires_grad=True) + w2 = torch.tensor(w2, requires_grad=True) + + weights = [w1, w2] + + loss = cost(weights) + loss.backward() + + def test_hessian(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a scalar valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="torch") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = torch.tensor([1.0, 2.0], requires_grad=True) + res = circuit(x) + + res.backward() + g = x.grad + + hess = hessian(circuit, x) + a, b = x.detach().numpy() + + expected_res = np.cos(a) * np.cos(b) + assert np.allclose(res.detach(), expected_res, atol=tol, rtol=0) + + expected_g = [-np.sin(a) * np.cos(b), -np.cos(a) * np.sin(b)] + assert np.allclose(g.detach(), expected_g, atol=tol, rtol=0) + + expected_hess = [ + [-np.cos(a) * np.cos(b), np.sin(a) * np.sin(b)], + [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)], + ] + assert np.allclose(hess.detach(), expected_hess, atol=tol, rtol=0) + + def test_hessian_vector_valued(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a vector valued QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="torch") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.probs(wires=0) + + x = torch.tensor([1.0, 2.0], requires_grad=True) + res = circuit(x) + jac_fn = lambda x: jacobian(circuit, x, create_graph=True) + + g = jac_fn(x) + hess = jacobian(jac_fn, x) + a, b = x.detach().numpy() + + expected_res = [ + 0.5 + 0.5 * np.cos(a) * np.cos(b), + 0.5 - 0.5 * np.cos(a) * np.cos(b), + ] + assert np.allclose(res.detach(), expected_res, atol=tol, rtol=0) + + expected_g = [ + [-0.5 * np.sin(a) * np.cos(b), -0.5 * np.cos(a) * np.sin(b)], + [0.5 * np.sin(a) * np.cos(b), 0.5 * np.cos(a) * np.sin(b)], + ] + assert np.allclose(g.detach(), expected_g, atol=tol, rtol=0) + + expected_hess = [ + [ + [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.sin(a) * np.sin(b)], + [0.5 * np.sin(a) * np.sin(b), -0.5 * np.cos(a) * np.cos(b)], + ], + [ + [0.5 * np.cos(a) * np.cos(b), -0.5 * np.sin(a) * np.sin(b)], + [-0.5 * np.sin(a) * np.sin(b), 0.5 * np.cos(a) * np.cos(b)], + ], + ] + assert np.allclose(hess.detach(), expected_hess, atol=tol, rtol=0) + + def test_hessian_ragged(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a ragged QNode""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=2) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="torch") + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + qml.RY(x[0], wires=1) + qml.RX(x[1], wires=1) + return qml.expval(qml.PauliZ(0)), qml.probs(wires=1) + + x = torch.tensor([1.0, 2.0], requires_grad=True) + res = circuit(x) + jac_fn = lambda x: jacobian(circuit, x, create_graph=True) + + g = jac_fn(x) + hess = jacobian(jac_fn, x) + a, b = x.detach().numpy() + + expected_res = [ + np.cos(a) * np.cos(b), + 0.5 + 0.5 * np.cos(a) * np.cos(b), + 0.5 - 0.5 * np.cos(a) * np.cos(b), + ] + assert np.allclose(res.detach(), expected_res, atol=tol, rtol=0) + + expected_g = [ + [-np.sin(a) * np.cos(b), -np.cos(a) * np.sin(b)], + [-0.5 * np.sin(a) * np.cos(b), -0.5 * np.cos(a) * np.sin(b)], + [0.5 * np.sin(a) * np.cos(b), 0.5 * np.cos(a) * np.sin(b)], + ] + assert np.allclose(g.detach(), expected_g, atol=tol, rtol=0) + + expected_hess = [ + [ + [-np.cos(a) * np.cos(b), np.sin(a) * np.sin(b)], + [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)], + ], + [ + [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.sin(a) * np.sin(b)], + [0.5 * np.sin(a) * np.sin(b), -0.5 * np.cos(a) * np.cos(b)], + ], + [ + [0.5 * np.cos(a) * np.cos(b), -0.5 * np.sin(a) * np.sin(b)], + [-0.5 * np.sin(a) * np.sin(b), 0.5 * np.cos(a) * np.cos(b)], + ], + ] + assert np.allclose(hess.detach(), expected_hess, atol=tol, rtol=0) + + def test_hessian_vector_valued_postprocessing(self, dev_name, diff_method, mode, tol): + """Test hessian calculation of a vector valued QNode with post-processing""" + if diff_method not in {"parameter-shift", "backprop"}: + pytest.skip("Test only supports parameter-shift or backprop") + + dev = qml.device(dev_name, wires=1) + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="torch") + def circuit(x): + qml.RX(x[0], wires=0) + qml.RY(x[1], wires=0) + return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(0))] + + x = torch.tensor([0.76, -0.87], requires_grad=True, dtype=torch.float64) + + def cost_fn(x): + return x @ circuit(x) + + a, b = x.detach().numpy() + + res = cost_fn(x) + expected_res = np.array([a, b]) @ [np.cos(a) * np.cos(b), np.cos(a) * np.cos(b)] + assert np.allclose(res.detach(), expected_res, atol=tol, rtol=0) + + res.backward() + + g = x.grad + expected_g = [ + np.cos(b) * (np.cos(a) - (a + b) * np.sin(a)), + np.cos(a) * (np.cos(b) - (a + b) * np.sin(b)), + ] + assert np.allclose(g.detach(), expected_g, atol=tol, rtol=0) + + hess = hessian(cost_fn, x) + expected_hess = [ + [ + -(np.cos(b) * ((a + b) * np.cos(a) + 2 * np.sin(a))), + -(np.cos(b) * np.sin(a)) + (-np.cos(a) + (a + b) * np.sin(a)) * np.sin(b), + ], + [ + -(np.cos(b) * np.sin(a)) + (-np.cos(a) + (a + b) * np.sin(a)) * np.sin(b), + -(np.cos(a) * ((a + b) * np.cos(b) + 2 * np.sin(b))), + ], + ] + + assert np.allclose(hess.detach(), expected_hess, atol=tol, rtol=0) + + def test_state(self, dev_name, diff_method, mode, tol): + """Test that the state can be returned and differentiated""" + if diff_method == "adjoint": + pytest.skip("Adjoint does not support states") + + dev = qml.device(dev_name, wires=2) + + x = torch.tensor(0.543, requires_grad=True) + y = torch.tensor(-0.654, requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="torch", mode=mode) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.state() + + def cost_fn(x, y): + res = circuit(x, y) + assert res.dtype is torch.complex128 + probs = torch.abs(res) ** 2 + return probs[0] + probs[2] + + res = cost_fn(x, y) + + if diff_method not in {"backprop"}: + pytest.skip("Test only supports backprop") + + res.backward() + res = torch.tensor([x.grad, y.grad]) + expected = torch.tensor( + [-torch.sin(x) * torch.cos(y) / 2, -torch.cos(x) * torch.sin(y) / 2] + ) + assert torch.allclose(res, expected, atol=tol, rtol=0) + + def test_projector(self, dev_name, diff_method, mode, tol): + """Test that the variance of a projector is correctly returned""" + if diff_method == "adjoint": + pytest.skip("Adjoint does not support projectors") + + dev = qml.device(dev_name, wires=2) + P = torch.tensor([1], requires_grad=False) + + x, y = 0.765, -0.654 + weights = torch.tensor([x, y], requires_grad=True) + + @qnode(dev, diff_method=diff_method, interface="torch", mode=mode) + def circuit(x, y): + qml.RX(x, wires=0) + qml.RY(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.var(qml.Projector(P, wires=0) @ qml.PauliX(1)) + + res = circuit(*weights) + expected = 0.25 * np.sin(x / 2) ** 2 * (3 + np.cos(2 * y) + 2 * np.cos(x) * np.sin(y) ** 2) + assert np.allclose(res.detach(), expected, atol=tol, rtol=0) + + res.backward() + expected = np.array( + [ + [ + 0.5 * np.sin(x) * (np.cos(x / 2) ** 2 + np.cos(2 * y) * np.sin(x / 2) ** 2), + -2 * np.cos(y) * np.sin(x / 2) ** 4 * np.sin(y), + ] + ] + ) + assert np.allclose(weights.grad.detach(), expected, atol=tol, rtol=0) + + +@pytest.mark.parametrize( + "diff_method,kwargs", + [["finite-diff", {}], ("parameter-shift", {}), ("parameter-shift", {"force_order2": True})], +) +class TestCV: + """Tests for CV integration""" + + def test_first_order_observable(self, diff_method, kwargs, tol): + """Test variance of a first order CV observable""" + dev = qml.device("default.gaussian", wires=1) + + r = torch.tensor(0.543, dtype=torch.float64, requires_grad=True) + phi = torch.tensor(-0.654, dtype=torch.float64, requires_grad=True) + + @qnode(dev, interface="torch", diff_method=diff_method, **kwargs) + def circuit(r, phi): + qml.Squeezing(r, 0, wires=0) + qml.Rotation(phi, wires=0) + return qml.var(qml.X(0)) + + res = circuit(r, phi) + expected = torch.exp(2 * r) * torch.sin(phi) ** 2 + torch.exp(-2 * r) * torch.cos(phi) ** 2 + assert torch.allclose(res, expected, atol=tol, rtol=0) + + # circuit jacobians + res.backward() + res = torch.tensor([r.grad, phi.grad]) + expected = torch.tensor( + [ + [ + 2 * torch.exp(2 * r) * torch.sin(phi) ** 2 + - 2 * torch.exp(-2 * r) * torch.cos(phi) ** 2, + 2 * torch.sinh(2 * r) * torch.sin(2 * phi), + ] + ] + ) + assert torch.allclose(res, expected, atol=tol, rtol=0) + + def test_second_order_observable(self, diff_method, kwargs, tol): + """Test variance of a second order CV expectation value""" + dev = qml.device("default.gaussian", wires=1) + + n = torch.tensor(0.12, dtype=torch.float64, requires_grad=True) + a = torch.tensor(0.765, dtype=torch.float64, requires_grad=True) + + @qnode(dev, interface="torch", diff_method=diff_method, **kwargs) + def circuit(n, a): + qml.ThermalState(n, wires=0) + qml.Displacement(a, 0, wires=0) + return qml.var(qml.NumberOperator(0)) + + res = circuit(n, a) + expected = n ** 2 + n + torch.abs(a) ** 2 * (1 + 2 * n) + assert torch.allclose(res, expected, atol=tol, rtol=0) + + # circuit jacobians + res.backward() + res = torch.tensor([n.grad, a.grad]) + expected = torch.tensor([[2 * a ** 2 + 2 * n + 1, 2 * a * (2 * n + 1)]]) + assert torch.allclose(res, expected, atol=tol, rtol=0) diff --git a/tests/interfaces/test_qnode_autograd.py b/tests/interfaces/test_qnode_autograd.py index c4d81d054b8..df8f94e5173 100644 --- a/tests/interfaces/test_qnode_autograd.py +++ b/tests/interfaces/test_qnode_autograd.py @@ -423,10 +423,22 @@ def circuit(a, p): return qml.expval(qml.PauliX(0)) res = circuit(a, p) - assert circuit.qtape.trainable_params == {1, 2, 3, 4} + + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1, 2, 3, 4} + elif diff_method == "backprop": + # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() + # is never called. As a result, JacobianTape.trainable_params is never set --- the ML + # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. + assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + + if diff_method == "finite-diff": + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + elif diff_method == "backprop": + # In backprop mode, all parameters are returned. + assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * ( np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2]) diff --git a/tests/interfaces/test_qnode_tf.py b/tests/interfaces/test_qnode_tf.py index ff728a49a6e..c674c3b5c43 100644 --- a/tests/interfaces/test_qnode_tf.py +++ b/tests/interfaces/test_qnode_tf.py @@ -447,9 +447,20 @@ def circuit(a, p): with tf.GradientTape() as tape: res = circuit(a, p) - assert circuit.qtape.trainable_params == {1, 2, 3, 4} + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1, 2, 3, 4} + elif diff_method == "backprop": + # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() + # is never called. As a result, JacobianTape.trainable_params is never set --- the ML + # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. + assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + + if diff_method == "finite-diff": + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + elif diff_method == "backprop": + assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * ( tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2]) diff --git a/tests/interfaces/test_qnode_torch.py b/tests/interfaces/test_qnode_torch.py index 7a0bc1ec917..f0ca8ce7a9f 100644 --- a/tests/interfaces/test_qnode_torch.py +++ b/tests/interfaces/test_qnode_torch.py @@ -458,9 +458,20 @@ def circuit(a, p): res = circuit(a, p) - assert circuit.qtape.trainable_params == {1, 2, 3, 4} + if diff_method == "finite-diff": + assert circuit.qtape.trainable_params == {1, 2, 3, 4} + elif diff_method == "backprop": + # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() + # is never called. As a result, JacobianTape.trainable_params is never set --- the ML + # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. + assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + + if diff_method == "finite-diff": + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) + elif diff_method == "backprop": + assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) expected = np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0]) + np.sin(a) * ( np.cos(p_val[2]) * np.sin(p_val[1]) From 0129cb70e80544c752b92128287a779b4e44006a Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 15:06:16 +0800 Subject: [PATCH 18/52] revert --- tests/interfaces/test_batch_autograd.py | 32 +++++++++++++++ tests/interfaces/test_batch_tensorflow.py | 30 ++++++++++++++ tests/interfaces/test_batch_torch.py | 50 +++++++++++++++++++++++ tests/interfaces/test_qnode_autograd.py | 16 +------- tests/interfaces/test_qnode_tf.py | 15 +------ tests/interfaces/test_qnode_torch.py | 15 +------ 6 files changed, 118 insertions(+), 40 deletions(-) diff --git a/tests/interfaces/test_batch_autograd.py b/tests/interfaces/test_batch_autograd.py index 27f00b902c3..aca6ec91103 100644 --- a/tests/interfaces/test_batch_autograd.py +++ b/tests/interfaces/test_batch_autograd.py @@ -439,6 +439,38 @@ def cost(a, b, device): expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]] assert np.allclose(res, expected, atol=tol, rtol=0) + def test_tape_no_parameters(self, execute_kwargs, tol): + """Test that a tape with no parameters is correctly + ignored during the gradient computation""" + dev = qml.device("default.qubit", wires=1) + + def cost(params): + with qml.tape.JacobianTape() as tape1: + qml.Hadamard(0) + qml.expval(qml.PauliX(0)) + + with qml.tape.JacobianTape() as tape2: + qml.RY(np.array(0.5, requires_grad=False), wires=0) + qml.expval(qml.PauliZ(0)) + + with qml.tape.JacobianTape() as tape3: + qml.RY(params[0], wires=0) + qml.RX(params[1], wires=0) + qml.expval(qml.PauliZ(0)) + + return sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) + + params = np.array([0.1, 0.2], requires_grad=True) + x, y = params + + res = cost(params) + expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) + assert np.allclose(res, expected, atol=tol, rtol=0) + + grad = qml.grad(cost)(params) + expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + def test_reusing_quantum_tape(self, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = np.array(0.1, requires_grad=True) diff --git a/tests/interfaces/test_batch_tensorflow.py b/tests/interfaces/test_batch_tensorflow.py index 9b2969f9b8e..948d0a858f2 100644 --- a/tests/interfaces/test_batch_tensorflow.py +++ b/tests/interfaces/test_batch_tensorflow.py @@ -367,6 +367,36 @@ def test_jacobian(self, execute_kwargs, tol): expected = [[-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]] assert np.allclose(expected, [agrad, bgrad], atol=tol, rtol=0) + def test_tape_no_parameters(self, execute_kwargs, tol): + """Test that a tape with no parameters is correctly + ignored during the gradient computation""" + dev = qml.device("default.qubit", wires=1) + params = tf.Variable([0.1, 0.2], dtype=tf.float64) + x, y = 1.0 * params + + with tf.GradientTape() as t: + with qml.tape.JacobianTape() as tape1: + qml.Hadamard(0) + qml.expval(qml.PauliX(0)) + + with qml.tape.JacobianTape() as tape2: + qml.RY(0.5, wires=0) + qml.expval(qml.PauliZ(0)) + + with qml.tape.JacobianTape() as tape3: + qml.RY(params[0], wires=0) + qml.RX(params[1], wires=0) + qml.expval(qml.PauliZ(0)) + + res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) + + expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) + assert np.allclose(res, expected, atol=tol, rtol=0) + + grad = t.gradient(res, params) + expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + def test_reusing_quantum_tape(self, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = tf.Variable(0.1, dtype=tf.float64) diff --git a/tests/interfaces/test_batch_torch.py b/tests/interfaces/test_batch_torch.py index ba0e0ca41f7..2c9db85c427 100644 --- a/tests/interfaces/test_batch_torch.py +++ b/tests/interfaces/test_batch_torch.py @@ -439,6 +439,35 @@ def test_jacobian(self, torch_device, execute_kwargs, tol): assert torch.allclose(a.grad, expected[0], atol=tol, rtol=0) assert torch.allclose(b.grad, expected[1], atol=tol, rtol=0) + def test_tape_no_parameters(self, torch_device, execute_kwargs, tol): + """Test that a tape with no parameters is correctly + ignored during the gradient computation""" + dev = qml.device("default.qubit", wires=1) + params = torch.tensor([0.1, 0.2], requires_grad=True, device=torch_device) + x, y = params.detach() + + with qml.tape.JacobianTape() as tape1: + qml.Hadamard(0) + qml.expval(qml.PauliX(0)) + + with qml.tape.JacobianTape() as tape2: + qml.RY(0.5, wires=0) + qml.expval(qml.PauliZ(0)) + + with qml.tape.JacobianTape() as tape3: + qml.RY(params[0], wires=0) + qml.RX(params[1], wires=0) + qml.expval(qml.PauliZ(0)) + + res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs)) + expected = 1 + np.cos(0.5) + np.cos(x) * np.cos(y) + assert np.allclose(res.detach(), expected, atol=tol, rtol=0) + + res.backward() + grad = params.grad.detach() + expected = [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)] + assert np.allclose(grad, expected, atol=tol, rtol=0) + def test_reusing_quantum_tape(self, torch_device, execute_kwargs, tol): """Test re-using a quantum tape by passing new parameters""" a = torch.tensor(0.1, requires_grad=True, device=torch_device) @@ -760,6 +789,27 @@ def test_sampling(self, torch_device, execute_kwargs): assert res.shape == (2, 10) assert isinstance(res, torch.Tensor) + def test_sampling_expval(self, torch_device, execute_kwargs): + """Test sampling works as expected if combined with expectation values""" + if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward": + pytest.skip("Adjoint differentiation does not support samples") + + dev = qml.device("default.qubit", wires=2, shots=10) + + with qml.tape.JacobianTape() as tape: + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + qml.sample(qml.PauliZ(0)) + qml.expval(qml.PauliX(1)) + + res = execute([tape], dev, **execute_kwargs)[0] + + assert len(res) == 2 + assert isinstance(res, tuple) + assert res[0].shape == (10,) + assert isinstance(res[0], torch.Tensor) + assert isinstance(res[1], torch.Tensor) + def test_sampling_gradient_error(self, torch_device, execute_kwargs): """Test differentiating a tape with sampling results in an error""" if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward": diff --git a/tests/interfaces/test_qnode_autograd.py b/tests/interfaces/test_qnode_autograd.py index df8f94e5173..c4d81d054b8 100644 --- a/tests/interfaces/test_qnode_autograd.py +++ b/tests/interfaces/test_qnode_autograd.py @@ -423,22 +423,10 @@ def circuit(a, p): return qml.expval(qml.PauliX(0)) res = circuit(a, p) - - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} + assert circuit.qtape.trainable_params == {1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - # In backprop mode, all parameters are returned. - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * ( np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2]) diff --git a/tests/interfaces/test_qnode_tf.py b/tests/interfaces/test_qnode_tf.py index c674c3b5c43..ff728a49a6e 100644 --- a/tests/interfaces/test_qnode_tf.py +++ b/tests/interfaces/test_qnode_tf.py @@ -447,20 +447,9 @@ def circuit(a, p): with tf.GradientTape() as tape: res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - + assert circuit.qtape.trainable_params == {1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * ( tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2]) diff --git a/tests/interfaces/test_qnode_torch.py b/tests/interfaces/test_qnode_torch.py index f0ca8ce7a9f..7a0bc1ec917 100644 --- a/tests/interfaces/test_qnode_torch.py +++ b/tests/interfaces/test_qnode_torch.py @@ -458,20 +458,9 @@ def circuit(a, p): res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - + assert circuit.qtape.trainable_params == {1, 2, 3, 4} assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) expected = np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0]) + np.sin(a) * ( np.cos(p_val[2]) * np.sin(p_val[1]) From a79c827761e83f0d0c526368484750254b68db02 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 15:08:20 +0800 Subject: [PATCH 19/52] fix --- pennylane/gradients/parameter_shift.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index d7655420689..6fe6ad83443 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -331,8 +331,22 @@ def var_param_shift(tape, argnum, shift=np.pi / 2, gradient_recipes=None, f0=Non def processing_fn(results): # We need to expand the dimensions of the variance mask, # and convert it to be the same type as the results. - mask = qml.math.convert_like(qml.math.reshape(var_mask, [-1, 1]), results[0]) - f0 = qml.math.expand_dims(results[0], -1) + res = results[0] + ragged = getattr(results[0], "dtype", None) is np.dtype("object") + + mask = [] + for m, r in zip(var_mask, results[0]): + array_func = np.ones if m else np.zeros + shape = qml.math.shape(r) + shape = (1,) if shape == tuple() else shape + mask.append(array_func(shape, dtype=bool)) + + if ragged: + res = qml.math.hstack(res) + mask = qml.math.hstack(mask) + + mask = qml.math.convert_like(qml.math.reshape(mask, [-1, 1]), res) + f0 = qml.math.expand_dims(res, -1) pdA = pdA_fn(results[1:tape_boundary]) pdA2 = 0 From 59b9d7dbb0806d94c2cc76ce3a8b7e75e594ea19 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 15:25:58 +0800 Subject: [PATCH 20/52] fix --- doc/code/qml_beta.rst | 4 + pennylane/beta/__init__.py | 1 + pennylane/{new_qnode.py => beta/qnode.py} | 104 +++++++++++++++++- pennylane/transforms/batch_transform.py | 2 +- .../test_beta_qnode.py} | 8 +- 5 files changed, 113 insertions(+), 6 deletions(-) rename pennylane/{new_qnode.py => beta/qnode.py} (72%) rename tests/{test_new_qnode.py => beta/test_beta_qnode.py} (96%) diff --git a/doc/code/qml_beta.rst b/doc/code/qml_beta.rst index 0449d213748..2e23916876f 100644 --- a/doc/code/qml_beta.rst +++ b/doc/code/qml_beta.rst @@ -6,6 +6,10 @@ and features. .. currentmodule:: pennylane.beta +.. automodapi:: pennylane.beta + :include-all-objects: + :no-inheritance-diagram: + .. automodapi:: pennylane.beta.devices :include-all-objects: :no-inheritance-diagram: diff --git a/pennylane/beta/__init__.py b/pennylane/beta/__init__.py index ab03b010a91..aba358dd2d6 100644 --- a/pennylane/beta/__init__.py +++ b/pennylane/beta/__init__.py @@ -14,3 +14,4 @@ """ This module contains experimental, contributed, and beta code. """ +from .qnode import QNode, qnode diff --git a/pennylane/new_qnode.py b/pennylane/beta/qnode.py similarity index 72% rename from pennylane/new_qnode.py rename to pennylane/beta/qnode.py index eb2adef7aeb..87590aae915 100644 --- a/pennylane/new_qnode.py +++ b/pennylane/beta/qnode.py @@ -26,7 +26,109 @@ class QNode: - """New QNode""" + """Represents a quantum node in the hybrid computational graph. + + A *quantum node* contains a :ref:`quantum function ` + (corresponding to a :ref:`variational circuit `) + and the computational device it is executed on. + + The QNode calls the quantum function to construct a :class:`~.QuantumTape` instance representing + the quantum circuit. + + Args: + func (callable): a quantum function + device (~.Device): a PennyLane-compatible device + interface (str): The interface that will be used for classical backpropagation. + This affects the types of objects that can be passed to/returned from the QNode: + + * ``"autograd"``: Allows autograd to backpropagate + through the QNode. The QNode accepts default Python types + (floats, ints, lists) as well as NumPy array arguments, + and returns NumPy arrays. + + * ``"torch"``: Allows PyTorch to backpropogate + through the QNode. The QNode accepts and returns Torch tensors. + + * ``"tf"``: Allows TensorFlow in eager mode to backpropogate + through the QNode. The QNode accepts and returns + TensorFlow ``tf.Variable`` and ``tf.tensor`` objects. + + * ``"jax"``: Allows JAX to backpropogate + through the QNode. The QNode accepts and returns + JAX ``DeviceArray`` objects. + + * ``None``: The QNode accepts default Python types + (floats, ints, lists) as well as NumPy array arguments, + and returns NumPy arrays. It does not connect to any + machine learning library automatically for backpropagation. + + diff_method (str or .gradient_transform): The method of differentiation to use in the created QNode. + Can either be a :class:`~.gradient_transform`, which includes all quantum gradient + transforms in the :mod:`qml.gradients <.gradients>` module, or a string. The following + strings are allowed: + + * ``"best"``: Best available method. Uses classical backpropagation or the + device directly to compute the gradient if supported, otherwise will use + the analytic parameter-shift rule where possible with finite-difference as a fallback. + + * ``"device"``: Queries the device directly for the gradient. + Only allowed on devices that provide their own gradient computation. + + * ``"backprop"``: Use classical backpropagation. Only allowed on simulator + devices that are classically end-to-end differentiable, for example + :class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned + QNode can only be used with the machine-learning framework supported + by the device. + + * ``"adjoint"``: Uses an `adjoint method `__ that + reverses through the circuit after a forward pass by iteratively applying the inverse + (adjoint) gate. Only allowed on supported simulator devices such as + :class:`default.qubit <~.DefaultQubit>`. + + * ``"parameter-shift"``: Use the analytic parameter-shift + rule for all supported quantum operation arguments, with finite-difference + as a fallback. + + * ``"finite-diff"``: Uses numerical finite-differences for all quantum operation + arguments. + + * ``None``: QNode cannot be differentiated. Works the same as ``interface=None``. + + max_expansion (int): The number of times the internal circuit should be expanded when + executed on a device. Expansion occurs when an operation or measurement is not + supported, and results in a gate decomposition. If any operations in the decomposition + remain unsupported by the device, another expansion occurs. + mode (str): Whether the gradients should be computed on the forward + pass (``forward``) or the backward pass (``backward``). Only applies + if the device is queried for the gradient; gradient transform + functions available in ``qml.gradients`` are only supported on the backward + pass. + cache (bool or dict or Cache): Whether to cache evaluations. This can result in + a significant reduction in quantum evaluations during gradient computations. + If ``True``, a cache with corresponding ``cachesize`` is created for each batch + execution. If ``False``, no caching is used. You may also pass your own cache + to be used; this can be any object that implements the special methods + ``__getitem__()``, ``__setitem__()``, and ``__delitem__()``, such as a dictionary. + cachesize (int): The size of any auto-created caches. Only applies when ``cache=True``. + max_diff (int): If ``diff_method`` is a gradient transform, this option specifies + the maximum number of derivatives to support. Increasing this value allows + for higher order derivatives to be extracted, at the cost of additional + (classical) computational overhead during the backwards pass. + + Keyword Args: + Any additional keyword arguments provided are passed to the differentiation + method. Please refer to the :mod:`qml.gradients <.gradients>` module for details + on supported options for your chosen gradient transform. + + + **Example** + + >>> def circuit(x): + ... qml.RX(x, wires=0) + ... return expval(qml.PauliZ(0)) + >>> dev = qml.device("default.qubit", wires=1) + >>> qnode = qml.QNode(circuit, dev) + """ def __init__( self, diff --git a/pennylane/transforms/batch_transform.py b/pennylane/transforms/batch_transform.py index d65e458492a..7d62d88040a 100644 --- a/pennylane/transforms/batch_transform.py +++ b/pennylane/transforms/batch_transform.py @@ -17,7 +17,7 @@ import types import pennylane as qml -from pennylane.new_qnode import QNode +from pennylane.beta import QNode class batch_transform: diff --git a/tests/test_new_qnode.py b/tests/beta/test_beta_qnode.py similarity index 96% rename from tests/test_new_qnode.py rename to tests/beta/test_beta_qnode.py index 638ab0a7326..ddd404d494d 100644 --- a/tests/test_new_qnode.py +++ b/tests/beta/test_beta_qnode.py @@ -18,7 +18,7 @@ import pennylane as qml from pennylane import numpy as pnp -from pennylane.new_qnode import qnode, QNode +from pennylane.beta import qnode, QNode from pennylane.transforms import draw from pennylane.tape import JacobianTape @@ -228,13 +228,13 @@ def test_diff_method(self, mocker): diff method.""" dev = qml.device("default.qubit", wires=1) - mock_best = mocker.patch("pennylane.new_qnode.QNode.get_best_method") + mock_best = mocker.patch("pennylane.beta.QNode.get_best_method") mock_best.return_value = ("best", {}, dev) - mock_backprop = mocker.patch("pennylane.new_qnode.QNode._validate_backprop_method") + mock_backprop = mocker.patch("pennylane.beta.QNode._validate_backprop_method") mock_backprop.return_value = ("backprop", {}, dev) - mock_device = mocker.patch("pennylane.new_qnode.QNode._validate_device_method") + mock_device = mocker.patch("pennylane.beta.QNode._validate_device_method") mock_device.return_value = ("device", {}, dev) qn = QNode(dummyfunc, dev, diff_method="best") From 23f84334ec07148b9a5000d978fce70ceb5de28a Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 15:28:00 +0800 Subject: [PATCH 21/52] fix --- tests/interfaces/test_batch_autograd_qnode.py | 2 +- tests/interfaces/test_batch_tensorflow_qnode.py | 2 +- tests/interfaces/test_batch_torch_qnode.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 744c9f969c5..c75f2d2be9b 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -16,7 +16,7 @@ from pennylane import numpy as np import pennylane as qml -from pennylane.new_qnode import qnode, QNode +from pennylane.beta import qnode, QNode from pennylane.tape import JacobianTape qubit_device_and_diff_method = [ diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py index a23ca190bee..62db71219a1 100644 --- a/tests/interfaces/test_batch_tensorflow_qnode.py +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -18,7 +18,7 @@ tf = pytest.importorskip("tensorflow") import pennylane as qml -from pennylane.new_qnode import qnode, QNode +from pennylane.beta import qnode, QNode from pennylane.tape import JacobianTape diff --git a/tests/interfaces/test_batch_torch_qnode.py b/tests/interfaces/test_batch_torch_qnode.py index 8b10f72b55b..b4870d2cfbf 100644 --- a/tests/interfaces/test_batch_torch_qnode.py +++ b/tests/interfaces/test_batch_torch_qnode.py @@ -19,7 +19,7 @@ from torch.autograd.functional import hessian, jacobian import pennylane as qml -from pennylane.new_qnode import qnode, QNode +from pennylane.beta import qnode, QNode from pennylane.tape import JacobianTape From cd8b00c2d01b6d664fe3bea941585d571800d90f Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 17:09:19 +0800 Subject: [PATCH 22/52] fix cross references --- doc/introduction/interfaces.rst | 2 +- doc/introduction/interfaces/numpy.rst | 2 +- doc/introduction/interfaces/tf.rst | 2 +- doc/introduction/interfaces/torch.rst | 2 +- doc/introduction/templates.rst | 2 +- pennylane/beta/qnode.py | 14 ++++++++++++-- pennylane/gradients/__init__.py | 4 ++-- pennylane/gradients/finite_difference.py | 2 +- pennylane/gradients/parameter_shift.py | 2 +- pennylane/gradients/parameter_shift_cv.py | 2 +- .../templates/layers/simplified_two_design.py | 2 +- 11 files changed, 23 insertions(+), 13 deletions(-) diff --git a/doc/introduction/interfaces.rst b/doc/introduction/interfaces.rst index bbfded4f1a7..e25a3bc8cf4 100644 --- a/doc/introduction/interfaces.rst +++ b/doc/introduction/interfaces.rst @@ -100,7 +100,7 @@ Currently, there are four built-in interfaces: :doc:`NumPy `, interfaces make each of these libraries quantum-aware, allowing quantum circuits to be treated just like any other operation. -In PennyLane, an interface is declared when creating a :class:`~.QNode`, e.g., +In PennyLane, an interface is declared when creating a :class:`QNode `, e.g., .. code-block:: python diff --git a/doc/introduction/interfaces/numpy.rst b/doc/introduction/interfaces/numpy.rst index 64f5ef6f863..d8ae3b22218 100644 --- a/doc/introduction/interfaces/numpy.rst +++ b/doc/introduction/interfaces/numpy.rst @@ -3,7 +3,7 @@ NumPy interface =============== -.. note:: This interface is the default interface supported by PennyLane's :class:`~.QNode`. +.. note:: This interface is the default interface supported by PennyLane's :class:`QNode `. Using the NumPy interface diff --git a/doc/introduction/interfaces/tf.rst b/doc/introduction/interfaces/tf.rst index 9ea01ea92fc..c8bd996810c 100644 --- a/doc/introduction/interfaces/tf.rst +++ b/doc/introduction/interfaces/tf.rst @@ -30,7 +30,7 @@ Construction via keyword ------------------------ The :ref:`QNode decorator ` is the recommended way for creating -:class:`~.QNode` objects in PennyLane. The only change required to construct a TensorFlow-capable +:class:`QNode ` objects in PennyLane. The only change required to construct a TensorFlow-capable QNode is to specify the ``interface='tf'`` keyword argument: .. code-block:: python diff --git a/doc/introduction/interfaces/torch.rst b/doc/introduction/interfaces/torch.rst index 53cc25a76c3..dc130bce6f1 100644 --- a/doc/introduction/interfaces/torch.rst +++ b/doc/introduction/interfaces/torch.rst @@ -30,7 +30,7 @@ Construction via keyword ------------------------ The :ref:`QNode decorator ` is the recommended way for creating -:class:`~.QNode` objects in PennyLane. The only change required to construct a PyTorch-capable +:class:`QNode ` objects in PennyLane. The only change required to construct a PyTorch-capable QNode is to specify the ``interface='torch'`` keyword argument: .. code-block:: python diff --git a/doc/introduction/templates.rst b/doc/introduction/templates.rst index 364350c83b1..6e2ac1b72ce 100644 --- a/doc/introduction/templates.rst +++ b/doc/introduction/templates.rst @@ -14,7 +14,7 @@ literature, such architectures are commonly known as an *ansatz*. Templates are constructed out of **structured combinations** of the quantum operations provided by PennyLane. This means that **template functions can only be used within a - valid** :class:`~.QNode`. + valid** :class:`QNode `. PennyLane conceptually distinguishes different types of templates, such as :ref:`Embeddings `, :ref:`Layers `, :ref:`State preparations ` and diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 87590aae915..fb97f9e1033 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -116,18 +116,28 @@ class QNode: (classical) computational overhead during the backwards pass. Keyword Args: - Any additional keyword arguments provided are passed to the differentiation + **kwargs: Any additional keyword arguments provided are passed to the differentiation method. Please refer to the :mod:`qml.gradients <.gradients>` module for details on supported options for your chosen gradient transform. **Example** + QNodes can be created by decorating a quantum function: + + >>> dev = qml.device("default.qubit", wires=1) + ... @qml.beta.qnode(dev) + ... def circuit(x): + ... qml.RX(x, wires=0) + ... return expval(qml.PauliZ(0)) + + or by instantiating the class directly: + >>> def circuit(x): ... qml.RX(x, wires=0) ... return expval(qml.PauliZ(0)) >>> dev = qml.device("default.qubit", wires=1) - >>> qnode = qml.QNode(circuit, dev) + >>> qnode = qml.beta.QNode(circuit, dev) """ def __init__( diff --git a/pennylane/gradients/__init__.py b/pennylane/gradients/__init__.py index 14d8e9a743e..af06bad1697 100644 --- a/pennylane/gradients/__init__.py +++ b/pennylane/gradients/__init__.py @@ -31,7 +31,7 @@ - Transforming quantum circuits directly - Registering a quantum gradient strategy for use when performing autodifferentiation - with a :class:`~.QNode`. + with a :class:`QNode `. Overview -------- @@ -71,7 +71,7 @@ ----------------------------------------- All PennyLane QNodes are automatically differentiable, and can be included -seamlessly within an autodiff pipeline. When creating a :class:`~.QNode`, the +seamlessly within an autodiff pipeline. When creating a :class:`QNode `, the strategy for determining the optimal differentiation strategy is *automated*, and takes into account the circuit, device, autodiff framework, and metadata (such as whether a finite number of shots are used). diff --git a/pennylane/gradients/finite_difference.py b/pennylane/gradients/finite_difference.py index 3a0b5b2b966..200ab870305 100644 --- a/pennylane/gradients/finite_difference.py +++ b/pennylane/gradients/finite_difference.py @@ -237,7 +237,7 @@ def finite_diff(tape, argnum=None, h=1e-7, approx_order=1, n=1, strategy="forwar .. UsageDetails:: - This gradient transform can also be applied directly to :class:`~.QNode` objects: + This gradient transform can also be applied directly to :class:`QNode ` objects: >>> @qml.qnode(dev) ... def circuit(params): diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index d7655420689..b83a41a33f8 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -478,7 +478,7 @@ def param_shift( .. UsageDetails:: - This gradient transform can be applied directly to :class:`~.QNode` objects: + This gradient transform can be applied directly to :class:`QNode ` objects: >>> @qml.qnode(dev) ... def circuit(params): diff --git a/pennylane/gradients/parameter_shift_cv.py b/pennylane/gradients/parameter_shift_cv.py index ec38d11a3b6..39b9fab0c46 100644 --- a/pennylane/gradients/parameter_shift_cv.py +++ b/pennylane/gradients/parameter_shift_cv.py @@ -595,7 +595,7 @@ def circuit(weights): .. UsageDetails:: - This gradient transform can be applied directly to :class:`~.QNode` objects: + This gradient transform can be applied directly to :class:`QNode ` objects: >>> @qml.qnode(dev) ... def circuit(params): diff --git a/pennylane/templates/layers/simplified_two_design.py b/pennylane/templates/layers/simplified_two_design.py index 8ca767b2216..01558e652dd 100644 --- a/pennylane/templates/layers/simplified_two_design.py +++ b/pennylane/templates/layers/simplified_two_design.py @@ -60,7 +60,7 @@ class SimplifiedTwoDesign(Operation): .. UsageDetails:: - template - here shown for two layers - is used inside a :class:`~.QNode`: + template - here shown for two layers - is used inside a :class:`QNode `: .. code-block:: python From 21c0386f0323108efb0a2a131a045694c3a3ee90 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 17:19:25 +0800 Subject: [PATCH 23/52] fix --- pennylane/beta/qnode.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index fb97f9e1033..8bddc26b829 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -117,9 +117,8 @@ class QNode: Keyword Args: **kwargs: Any additional keyword arguments provided are passed to the differentiation - method. Please refer to the :mod:`qml.gradients <.gradients>` module for details - on supported options for your chosen gradient transform. - + method. Please refer to the :mod:`qml.gradients <.gradients>` module for details + on supported options for your chosen gradient transform. **Example** From 3d84bb302a06ecc0c26c33cf70539167b97e35d4 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 17:40:44 +0800 Subject: [PATCH 24/52] fix --- pennylane/beta/qnode.py | 3 ++- pennylane/gradients/finite_difference.py | 2 +- pennylane/gradients/parameter_shift.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 8bddc26b829..6d0e69a88ce 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -536,4 +536,5 @@ def __call__(self, *args, **kwargs): qnode = lambda dev, **kwargs: functools.partial(QNode, device=dev, **kwargs) -qnode = functools.update_wrapper(qnode, QNode) +qnode.__doc__ = QNode.__doc__ +qnode.__signature__ = inspect.signature(QNode) diff --git a/pennylane/gradients/finite_difference.py b/pennylane/gradients/finite_difference.py index 200ab870305..a0888f19417 100644 --- a/pennylane/gradients/finite_difference.py +++ b/pennylane/gradients/finite_difference.py @@ -187,7 +187,7 @@ def finite_diff(tape, argnum=None, h=1e-7, approx_order=1, n=1, strategy="forwar parameters with respect to its inputs. Args: - qnode (.QNode or .QuantumTape): quantum tape or QNode to differentiate + qnode (pennylane.QNode or .QuantumTape): quantum tape or QNode to differentiate argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivatives with respect to all trainable parameters are returned. diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index b83a41a33f8..10c859206b1 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -371,7 +371,7 @@ def param_shift( parameters with respect to its inputs. Args: - qnode (.QNode or .QuantumTape): quantum tape or QNode to differentiate + qnode (pennylane.QNode or .QuantumTape): quantum tape or QNode to differentiate argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. From b4458701c2f3b216e702a3b588ea23e6e49ae3f9 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 13 Sep 2021 18:46:58 +0800 Subject: [PATCH 25/52] fix --- pennylane/transforms/classical_jacobian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pennylane/transforms/classical_jacobian.py b/pennylane/transforms/classical_jacobian.py index b19e808601a..c14999fd9d3 100644 --- a/pennylane/transforms/classical_jacobian.py +++ b/pennylane/transforms/classical_jacobian.py @@ -26,7 +26,7 @@ def classical_jacobian(qnode): arguments and the quantum gate arguments to be extracted. Args: - qnode (.QNode): QNode to compute the (classical) Jacobian of + qnode (pennylane.QNode): QNode to compute the (classical) Jacobian of Returns: function: Function which accepts the same arguments as the QNode. From 547a2863fc57fdb2207889c1ed4804974369134f Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 19:10:55 +0800 Subject: [PATCH 26/52] Add support for device and gradient expansions --- pennylane/_device.py | 93 ++++++++++++++++++- pennylane/_qubit_device.py | 2 +- pennylane/beta/qnode.py | 9 +- pennylane/gradients/gradient_transform.py | 24 +++-- pennylane/tape/tape.py | 3 + pennylane/templates/embeddings/angle.py | 1 + .../templates/layers/strongly_entangling.py | 1 + tests/interfaces/test_batch_autograd_qnode.py | 37 +++----- 8 files changed, 135 insertions(+), 35 deletions(-) diff --git a/pennylane/_device.py b/pennylane/_device.py index f0285da060e..9e08b57f0bd 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -487,6 +487,8 @@ def batch_execute(self, circuits): # not start the next computation in the zero state self.reset() + circuit = self.expand_fn(circuit) + res = self.execute(circuit.operations, circuit.observables) results.append(res) @@ -527,6 +529,7 @@ def execute_and_gradients(self, circuits, method="jacobian", **kwargs): # Evaluations and gradients are paired, so that # devices can re-use the device state for the # gradient computation (if applicable). + circuit = self.expand_fn(circuit) res.append(self.batch_execute([circuit])[0]) jacs.append(gradient_method(circuit, **kwargs)) @@ -552,7 +555,95 @@ def gradients(self, circuits, method="jacobian", **kwargs): shape ``(output_shape, num_params)``. """ gradient_method = getattr(self, method) - return [gradient_method(circuit, **kwargs) for circuit in circuits] + return [gradient_method(self.expand_fn(circuit), **kwargs) for circuit in circuits] + + def expand_fn(self, circuit, max_expansion=10): + """Method for expanding or decomposing an input circuit. + This method should be overwritten if custom expansion logic is + required. + + By default, this method expands the tape if: + + - nested tapes are present, + - any operations are not supported on the device, or + - multiple observables are measured on the same wire. + + Args: + circuit (.QuantumTape): the circuit to expand. + max_expansion (int): The number of times the circuit should be + expanded. Expansion occurs when an operation or measurement is not + supported, and results in a gate decomposition. If any operations + in the decomposition remain unsupported by the device, another + expansion occurs. + + Returns: + .QuantumTape: The expanded/decomposed circuit, such that the device + will support native + """ + obs_on_same_wire = len(circuit._obs_sharing_wires) > 0 + ops_not_supported = any( + isinstance(op, qml.tape.QuantumTape) # nested tapes must be expanded + or not self.supports_operation(op.name) # unsupported ops must be expanded + for op in circuit.operations + ) + + if ops_not_supported or obs_on_same_wire: + circuit = circuit.expand( + depth=max_expansion, + stop_at=lambda obj: not isinstance(obj, qml.tape.QuantumTape) + and self.supports_operation(obj.name), + ) + + return circuit + + def batch_transform(self, circuit): + """Apply a differentiable batch transform for preprocessing a circuit + prior to execution. This method is called directly by the QNode, and + should be overwritten if the device requires a transform that + generates multiple circuits prior to execution. + + By default, this method contains logic for generating multiple + circuits, one per term, of a circuit that terminates in ``expval(H)``, + if the underlying device does not support Hamiltonian expectation values, + or if the device requires finite-shots. + + .. warning:: + + This method will be tracked by autodifferentiation libraries, + such as Autograd, JAX, TensorFlow, and Torch. Please make sure + to use ``qml.math`` for autodiff-agnostic tensor processing + if required. + + Args: + circuit (.QuantumTape): the circuit to preprocess + + Returns: + tuple[Sequence[.QuantumTape], callable]: Returns a tuple containing + the sequence of circuits to be executed, and a post-processing function + to be applied to the list of evaluated circuit results. + """ + + # If the observable contains a Hamiltonian and the device does not + # support Hamiltonians, or if the simulation uses finite shots, + # split tape into multiple tapes of diagonalizable known observables. + # In future, this logic should be moved to the device + # to allow for more efficient batch execution. + supports_hamiltonian = self.supports_observable("Hamiltonian") + finite_shots = self.shots is not None + + hamiltonian_in_obs = "Hamiltonian" in [obs.name for obs in circuit.observables] + + if hamiltonian_in_obs and (not supports_hamiltonian or finite_shots): + try: + return qml.transforms.hamiltonian_expand(circuit, group=False) + + except ValueError as e: + raise ValueError( + "Can only return the expectation of a single Hamiltonian observable" + ) from e + + # otherwise, return an identity transform + return [circuit], lambda res: res[0] @property def op_queue(self): diff --git a/pennylane/_qubit_device.py b/pennylane/_qubit_device.py index dad88462e5a..2d10b3a8112 100644 --- a/pennylane/_qubit_device.py +++ b/pennylane/_qubit_device.py @@ -275,7 +275,7 @@ def batch_execute(self, circuits): # not start the next computation in the zero state self.reset() - res = self.execute(circuit) + res = self.execute(self.expand_fn(circuit)) results.append(res) if self.tracker.active: diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 6d0e69a88ce..af1bf138211 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -511,15 +511,20 @@ def __call__(self, *args, **kwargs): # construct the tape self.construct(args, kwargs) + # preprocess the tapes by applying any device-specific transforms + tapes, processing_fn = self.device.batch_transform(self.tape) + res = qml.execute( - [self.tape], + tapes, device=self.device, gradient_fn=self.gradient_fn, interface=self.interface, gradient_kwargs=self.gradient_kwargs, override_shots=override_shots, **self.execute_kwargs, - )[0] + ) + + res = processing_fn(res) if override_shots is not False: # restore the initialization gradient function diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index 1846be0f5da..01f7ca7d0ec 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -19,7 +19,17 @@ unsupported_op = lambda op: op.grad_method is None supported_op = lambda op: op.grad_method is not None -trainable_op = lambda op: any(qml.math.requires_grad(p) for p in op.parameters) +trainable_op = lambda op: any(qml.math.requires_grad(p) for p in op.parameters) or getattr( + op, "trainable", False +) + + +# Define the stopping condition for the expansion +def stop_cond(obj): + if isinstance(obj, qml.measure.MeasurementProcess): + return True + + return (supported_op(obj) and trainable_op(obj)) or not trainable_op(obj) def gradient_expand(tape, depth=10): @@ -38,16 +48,14 @@ def gradient_expand(tape, depth=10): Returns: .QuantumTape: the expanded tape """ + requires_expansion = False # check if the tape contains unsupported trainable operations - if any(unsupported_op(op) and trainable_op(op) for op in tape.operations): - - # Define the stopping condition for the expansion - stop_cond = lambda obj: ( - not isinstance(obj, qml.measure.MeasurementProcess) - and ((supported_op(obj) and trainable_op(obj)) or not trainable_op(obj)) - ) + for idx in range(tape.num_params): + op = tape.get_operation(idx)[0] + requires_expansion = unsupported_op(op) + if requires_expansion: return tape.expand(depth=depth, stop_at=stop_cond) return tape diff --git a/pennylane/tape/tape.py b/pennylane/tape/tape.py index 66391c7f2e7..e6a231fc6da 100644 --- a/pennylane/tape/tape.py +++ b/pennylane/tape/tape.py @@ -496,6 +496,9 @@ def _update_trainable_params(self): """Set the trainable parameters""" self._trainable_params = set(self._par_info) + for idx in range(self.num_params): + self.get_operation(idx)[0].trainable = True + def _update(self): """Update all internal tape metadata regarding processed operations and observables""" self._graph = None diff --git a/pennylane/templates/embeddings/angle.py b/pennylane/templates/embeddings/angle.py index dd52043849e..abbcef74116 100644 --- a/pennylane/templates/embeddings/angle.py +++ b/pennylane/templates/embeddings/angle.py @@ -49,6 +49,7 @@ class AngleEmbedding(Operation): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, features, wires, rotation="X", do_queue=True, id=None): diff --git a/pennylane/templates/layers/strongly_entangling.py b/pennylane/templates/layers/strongly_entangling.py index 699f3f9d2e0..97225279b9c 100644 --- a/pennylane/templates/layers/strongly_entangling.py +++ b/pennylane/templates/layers/strongly_entangling.py @@ -67,6 +67,7 @@ class StronglyEntanglingLayers(Operation): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, weights, wires, ranges=None, imprimitive=None, do_queue=True, id=None): diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index c75f2d2be9b..31fb5e5dd98 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -377,7 +377,6 @@ def circuit(data1): with pytest.raises(qml.numpy.NonDifferentiableError, match="is non-differentiable"): grad_fn(data1) - @pytest.mark.xfail def test_differentiable_expand(self, dev_name, diff_method, mode, tol): """Test that operation and nested tapes expansion is differentiable""" @@ -405,22 +404,6 @@ def circuit(a, p): res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - - assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - # In backprop mode, all parameters are returned. - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) - expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * ( np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2]) ) @@ -685,20 +668,25 @@ def circuit(): assert res.shape == (2, 10) assert isinstance(res, np.ndarray) - @pytest.mark.xfail def test_chained_qnodes(self, dev_name, diff_method, mode): """Test that the gradient of chained QNodes works without error""" dev = qml.device(dev_name, wires=2) + class Template(qml.templates.StronglyEntanglingLayers): + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.templates.StronglyEntanglingLayers(*self.parameters, self.wires) + return tape + @qnode(dev, interface="autograd", diff_method=diff_method) def circuit1(weights): - qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) + Template(weights, wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) @qnode(dev, interface="autograd", diff_method=diff_method) def circuit2(data, weights): qml.templates.AngleEmbedding(data, wires=[0, 1]) - qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) + Template(weights, wires=[0, 1]) return qml.expval(qml.PauliX(0)) def cost(weights): @@ -707,10 +695,13 @@ def cost(weights): c2 = circuit2(c1, w2) return np.sum(c2) ** 2 - w1 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=3) - w2 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=4) + w1 = qml.templates.StronglyEntanglingLayers.shape(n_wires=2, n_layers=3) + w2 = qml.templates.StronglyEntanglingLayers.shape(n_wires=2, n_layers=4) - weights = [w1, w2] + weights = [ + np.random.random(w1), + np.random.random(w2), + ] grad_fn = qml.grad(cost) res = grad_fn(weights) From 440a46ab7308a7c40d74bdccba27936795b98b3d Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 20:05:41 +0800 Subject: [PATCH 27/52] fixes --- pennylane/_device.py | 3 +-- pennylane/gradients/gradient_transform.py | 3 +++ pennylane/interfaces/batch/__init__.py | 3 +++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pennylane/_device.py b/pennylane/_device.py index 9e08b57f0bd..ac61a3db869 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -529,7 +529,6 @@ def execute_and_gradients(self, circuits, method="jacobian", **kwargs): # Evaluations and gradients are paired, so that # devices can re-use the device state for the # gradient computation (if applicable). - circuit = self.expand_fn(circuit) res.append(self.batch_execute([circuit])[0]) jacs.append(gradient_method(circuit, **kwargs)) @@ -555,7 +554,7 @@ def gradients(self, circuits, method="jacobian", **kwargs): shape ``(output_shape, num_params)``. """ gradient_method = getattr(self, method) - return [gradient_method(self.expand_fn(circuit), **kwargs) for circuit in circuits] + return [gradient_method(circuit, **kwargs) for circuit in circuits] def expand_fn(self, circuit, max_expansion=10): """Method for expanding or decomposing an input circuit. diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index 01f7ca7d0ec..126abdca46a 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -55,6 +55,9 @@ def gradient_expand(tape, depth=10): op = tape.get_operation(idx)[0] requires_expansion = unsupported_op(op) + if requires_expansion: + break + if requires_expansion: return tape.expand(depth=depth, stop_at=stop_cond) diff --git a/pennylane/interfaces/batch/__init__.py b/pennylane/interfaces/batch/__init__.py index 6f4f1adb610..88c12e0a691 100644 --- a/pennylane/interfaces/batch/__init__.py +++ b/pennylane/interfaces/batch/__init__.py @@ -299,6 +299,9 @@ def cost_fn(params, x): if gradient_fn == "device": # gradient function is a device method + for i, tape in enumerate(tapes): + tapes[i] = device.expand_fn(tape) + if mode in ("forward", "best"): # replace the forward execution function to return # both results and gradients From d8d3b66df423c4ee783fb13c71667e0222244d68 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 20:11:02 +0800 Subject: [PATCH 28/52] more fixes --- pennylane/_device.py | 3 --- pennylane/_qubit_device.py | 3 +-- pennylane/interfaces/batch/__init__.py | 22 ++++++++++++++++++---- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/pennylane/_device.py b/pennylane/_device.py index ac61a3db869..2d7588457b4 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -486,9 +486,6 @@ def batch_execute(self, circuits): # we need to reset the device here, else it will # not start the next computation in the zero state self.reset() - - circuit = self.expand_fn(circuit) - res = self.execute(circuit.operations, circuit.observables) results.append(res) diff --git a/pennylane/_qubit_device.py b/pennylane/_qubit_device.py index 2d10b3a8112..ee12c210a2b 100644 --- a/pennylane/_qubit_device.py +++ b/pennylane/_qubit_device.py @@ -274,8 +274,7 @@ def batch_execute(self, circuits): # we need to reset the device here, else it will # not start the next computation in the zero state self.reset() - - res = self.execute(self.expand_fn(circuit)) + res = self.execute(circuit) results.append(res) if self.tracker.active: diff --git a/pennylane/interfaces/batch/__init__.py b/pennylane/interfaces/batch/__init__.py index 88c12e0a691..6caeed01a62 100644 --- a/pennylane/interfaces/batch/__init__.py +++ b/pennylane/interfaces/batch/__init__.py @@ -69,7 +69,7 @@ def set_shots(device, shots): device.shots = original_shots -def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True): +def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True, expand_fn=None): """Decorator that adds caching to a function that executes multiple tapes on a device. @@ -106,6 +106,12 @@ def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True): function: a wrapped version of the execution function ``fn`` with caching support """ + if expand_fn is not None: + original_fn = fn + + def fn(tapes, **kwargs): + tapes = [expand_fn(tape) for tape in tapes] + return original_fn(tapes, **kwargs) @wraps(fn) def wrapper(tapes, **kwargs): @@ -286,19 +292,27 @@ def cost_fn(params, x): if gradient_fn is None: with qml.tape.Unwrap(*tapes): - res = cache_execute(batch_execute, cache, return_tuple=False)(tapes) + res = cache_execute( + batch_execute, cache, return_tuple=False, expand_fn=device.expand_fn + )(tapes) return res if gradient_fn == "backprop" or interface is None: - return cache_execute(batch_execute, cache, return_tuple=False)(tapes) + return cache_execute(batch_execute, cache, return_tuple=False, expand_fn=device.expand_fn)( + tapes + ) # the default execution function is batch_execute - execute_fn = cache_execute(batch_execute, cache) + execute_fn = cache_execute(batch_execute, cache, expand_fn=device.expand_fn) if gradient_fn == "device": # gradient function is a device method + # Expand all tapes as per the device's expand function here. + # We must do this now, prior to the interface, to ensure that + # decompositions with parameter processing is tracked by the + # autodiff frameworks. for i, tape in enumerate(tapes): tapes[i] = device.expand_fn(tape) From 316a16f0f765102e9b850ab4d3a31b3be9eb9c36 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 20:26:47 +0800 Subject: [PATCH 29/52] more --- pennylane/_device.py | 1 + pennylane/_qubit_device.py | 1 + tests/interfaces/test_batch_autograd_qnode.py | 44 +++++++++++++++++++ 3 files changed, 46 insertions(+) diff --git a/pennylane/_device.py b/pennylane/_device.py index 2d7588457b4..84656c267c4 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -486,6 +486,7 @@ def batch_execute(self, circuits): # we need to reset the device here, else it will # not start the next computation in the zero state self.reset() + res = self.execute(circuit.operations, circuit.observables) results.append(res) diff --git a/pennylane/_qubit_device.py b/pennylane/_qubit_device.py index ee12c210a2b..dad88462e5a 100644 --- a/pennylane/_qubit_device.py +++ b/pennylane/_qubit_device.py @@ -274,6 +274,7 @@ def batch_execute(self, circuits): # we need to reset the device here, else it will # not start the next computation in the zero state self.reset() + res = self.execute(circuit) results.append(res) diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 31fb5e5dd98..1b7c2b90540 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1227,3 +1227,47 @@ def circ(x): assert circ.device.num_executions == 1 spy.assert_called_with(mocker.ANY, use_device_state=True) + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestTapeExpansion: + """Test that tape expansion within the QNode works correctly""" + + def test_device_expansion(self, dev_name, diff_method, mode, mocker): + """Test expansion of an unsupported operation on the device""" + dev = qml.device(dev_name, wires=1) + + class UnsupportedOp(qml.operation.Operation): + num_wires = 1 + num_params = 1 + par_domain = "R" + + grad_method = "A" + grad_recipe = ([[1, 1, 0.5]],) + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RX(self.data[0] ** 2, wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode) + def circuit(x): + UnsupportedOp(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + if diff_method == "adjoint" and mode == "forward": + spy = mocker.spy(circuit.device, "execute_and_gradients") + else: + spy = mocker.spy(circuit.device, "batch_execute") + + x = np.array(0.6, requires_grad=True) + circuit(x) + + tape = spy.call_args[0][0][0] + assert len(tape.operations) == 1 + assert tape.operations[0].name == "RX" + assert np.allclose(tape.operations[0].parameters, x ** 2) + + res = qml.grad(circuit)(x) + print(res) + assert False \ No newline at end of file From 9b32238b335283107bd8f73b0b4c8afd25463573 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 21:38:39 +0800 Subject: [PATCH 30/52] add tests --- tests/interfaces/test_batch_autograd_qnode.py | 118 ++++++++++++++++-- 1 file changed, 110 insertions(+), 8 deletions(-) diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 1b7c2b90540..6d50e599d3e 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1242,12 +1242,9 @@ class UnsupportedOp(qml.operation.Operation): num_params = 1 par_domain = "R" - grad_method = "A" - grad_recipe = ([[1, 1, 0.5]],) - def expand(self): with qml.tape.QuantumTape() as tape: - qml.RX(self.data[0] ** 2, wires=self.wires) + qml.RX(3 * self.data[0], wires=self.wires) return tape @qnode(dev, diff_method=diff_method, mode=mode) @@ -1260,14 +1257,119 @@ def circuit(x): else: spy = mocker.spy(circuit.device, "batch_execute") - x = np.array(0.6, requires_grad=True) + x = np.array(0.5, requires_grad=True) circuit(x) tape = spy.call_args[0][0][0] assert len(tape.operations) == 1 assert tape.operations[0].name == "RX" - assert np.allclose(tape.operations[0].parameters, x ** 2) + assert np.allclose(tape.operations[0].parameters, 3 * x) + + def test_no_gradient_expansion(self, dev_name, diff_method, mode, mocker): + """Test that an unsupported operation with defined gradient recipe is + not expanded for both parameter-shift and finite-differences""" + dev = qml.device(dev_name, wires=1) + + class UnsupportedOp(qml.operation.Operation): + num_wires = 1 + num_params = 1 + par_domain = "R" + + grad_method = "A" + grad_recipe = ([[3 / 2, 1, np.pi / 6], [-3 / 2, 1, -np.pi / 6]],) + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RX(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2) + def circuit(x): + UnsupportedOp(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + x = np.array(0.5, requires_grad=True) + + if isinstance(circuit.gradient_fn, qml.gradients.gradient_transform): + # check that the gradient recipe was applied *prior* to + # device expansion + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + res = qml.grad(circuit)(x) + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 1 + assert input_tape.operations[0].name == "UnsupportedOp" + assert input_tape.operations[0].data[0] == x + + shifted_tape1, shifted_tape2 = spy.spy_return[0] + + assert len(shifted_tape1.operations) == 1 + assert shifted_tape1.operations[0].name == "UnsupportedOp" + + assert len(shifted_tape2.operations) == 1 + assert shifted_tape2.operations[0].name == "UnsupportedOp" + + else: + res = qml.grad(circuit)(x) + + assert np.allclose(res, -3 * np.sin(3 * x)) + + if diff_method in ("backprop", "parameter-shift"): + # check that second derivatives work + assert np.allclose(qml.grad(qml.grad(circuit))(x), -9 * np.cos(3 * x)) + + def test_gradient_expansion(self, dev_name, diff_method, mode, mocker): + """Test that a *supported* operation with no gradient recipe is + expanded for both parameter-shift and finite-differences, but not for execution.""" + if diff_method not in ("parameter-shift", "finite-diff"): + pytest.skip("Only supports gradient transforms") + + dev = qml.device(dev_name, wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2) + def circuit(x): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = np.array(0.5, requires_grad=True) + circuit(x) + tape = spy.call_args[0][0][0] + + # no expansion is done on the forward pass! + assert len(tape.operations) == 2 + assert tape.operations[1].name == "PhaseShift" + assert tape.operations[1].grad_method is None + + spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = qml.grad(circuit)(x) - print(res) - assert False \ No newline at end of file + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 2 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + + shifted_tape1, shifted_tape2 = spy.spy_return[0] + + assert len(shifted_tape1.operations) == 2 + assert shifted_tape1.operations[1].name == "RY" + + assert len(shifted_tape2.operations) == 2 + assert shifted_tape2.operations[1].name == "RY" + + assert np.allclose(res, -np.sin(3 * x)) + + if diff_method == "parameter-shift": + # test second order derivatives + res = qml.grad(qml.grad(circuit))(x) + assert np.allclose(res, -3 * np.cos(3 * x)) From 4a94acf2b1a783cbfbdca205a8a50cbc077e9dbb Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 21:51:41 +0800 Subject: [PATCH 31/52] more --- pennylane/_device.py | 4 +++- tests/gradients/test_gradient_transform.py | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pennylane/_device.py b/pennylane/_device.py index 84656c267c4..d7e84e613fa 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -577,7 +577,9 @@ def expand_fn(self, circuit, max_expansion=10): .QuantumTape: The expanded/decomposed circuit, such that the device will support native """ - obs_on_same_wire = len(circuit._obs_sharing_wires) > 0 + obs_on_same_wire = len(circuit._obs_sharing_wires) > 0 and not self.supports_observable( + "Hamiltonian" + ) ops_not_supported = any( isinstance(op, qml.tape.QuantumTape) # nested tapes must be expanded or not self.supports_operation(op.name) # unsupported ops must be expanded diff --git a/tests/gradients/test_gradient_transform.py b/tests/gradients/test_gradient_transform.py index 5b84db167d1..88670944e07 100644 --- a/tests/gradients/test_gradient_transform.py +++ b/tests/gradients/test_gradient_transform.py @@ -81,6 +81,10 @@ class NonDiffPhaseShift(qml.PhaseShift): qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) + params = tape.get_parameters(trainable_only=False) + tape.trainable_params = qml.math.get_trainable_indices(params) + assert tape.trainable_params == {1} + spy = mocker.spy(tape, "expand") new_tape = gradient_expand(tape) From 4bdf0922bdbe5af6f3dfc48e75a23ae788be2fa9 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 14 Sep 2021 22:56:04 +0800 Subject: [PATCH 32/52] more --- pennylane/gradients/gradient_transform.py | 9 ++-- pennylane/interfaces/batch/autograd.py | 3 ++ pennylane/tape/tape.py | 3 -- tests/interfaces/test_batch_autograd_qnode.py | 48 +++++++++++++++++++ 4 files changed, 56 insertions(+), 7 deletions(-) diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index 126abdca46a..59edf30ed2c 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -19,9 +19,7 @@ unsupported_op = lambda op: op.grad_method is None supported_op = lambda op: op.grad_method is not None -trainable_op = lambda op: any(qml.math.requires_grad(p) for p in op.parameters) or getattr( - op, "trainable", False -) +trainable_op = lambda op: any(qml.math.requires_grad(p) for p in op.parameters) # Define the stopping condition for the expansion @@ -59,7 +57,10 @@ def gradient_expand(tape, depth=10): break if requires_expansion: - return tape.expand(depth=depth, stop_at=stop_cond) + new_tape = tape.expand(depth=depth, stop_at=stop_cond) + params = new_tape.get_parameters(trainable_only=False) + new_tape.trainable_params = qml.math.get_trainable_indices(params) + return new_tape return tape diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index 97a56bb13e8..de7d4af30d0 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -174,6 +174,9 @@ def grad_fn(dy): # Generate and execute the required gradient tapes if _n == max_diff: + for i, tape in enumerate(tapes): + tapes[i] = gradient_fn.expand_fn(tape) + with qml.tape.Unwrap(*tapes): vjp_tapes, processing_fn = qml.gradients.batch_vjp( tapes, diff --git a/pennylane/tape/tape.py b/pennylane/tape/tape.py index e6a231fc6da..66391c7f2e7 100644 --- a/pennylane/tape/tape.py +++ b/pennylane/tape/tape.py @@ -496,9 +496,6 @@ def _update_trainable_params(self): """Set the trainable parameters""" self._trainable_params = set(self._par_info) - for idx in range(self.num_params): - self.get_operation(idx)[0].trainable = True - def _update(self): """Update all internal tape metadata regarding processed operations and observables""" self._graph = None diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 6d50e599d3e..7ab1344e3b4 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1373,3 +1373,51 @@ def circuit(x): # test second order derivatives res = qml.grad(qml.grad(circuit))(x) assert np.allclose(res, -3 * np.cos(3 * x)) + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_gradient_expansion_trainable_only(self, dev_name, diff_method, mode, max_diff, mocker): + """Test that a *supported* operation with no gradient recipe is only + expanded for parameter-shift and finite-differences when it is trainable.""" + if diff_method not in ("parameter-shift", "finite-diff"): + pytest.skip("Only supports gradient transforms") + + dev = qml.device(dev_name, wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff) + def circuit(x, y): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + PhaseShift(2 * y, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = np.array(0.5, requires_grad=True) + y = np.array(0.7, requires_grad=False) + circuit(x, y) + + # no expansion is done on the forward pass! + tape = spy.call_args[0][0][0] + assert len(tape.operations) == 3 + assert tape.operations[1].name == "PhaseShift" + assert tape.operations[1].grad_method is None + assert tape.operations[2].name == "PhaseShift" + assert tape.operations[2].grad_method is None + + # Expansion of only the first phase shift is done on the gradient pass + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + res = qml.grad(circuit)(x, y) + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 3 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + assert tape.operations[2].name == "PhaseShift" + assert tape.operations[2].grad_method is None From f0e5c3c0aec8a4f04c04e2a06b239bcefc760701 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 01:14:26 +0800 Subject: [PATCH 33/52] more --- pennylane/beta/qnode.py | 3 +++ pennylane/gradients/gradient_transform.py | 2 ++ pennylane/interfaces/batch/autograd.py | 3 --- tests/interfaces/test_batch_autograd_qnode.py | 22 ++++--------------- 4 files changed, 9 insertions(+), 21 deletions(-) diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index af1bf138211..b6142bbd6ee 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -489,6 +489,9 @@ def construct(self, args, kwargs): "Operator {} must act on all wires".format(obj.name) ) + if isinstance(self.gradient_fn, qml.gradients.gradient_transform): + self._tape = self.gradient_fn.expand_fn(self._tape) + def __call__(self, *args, **kwargs): override_shots = False diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index 59edf30ed2c..d6e72872705 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -174,6 +174,8 @@ def jacobian_wrapper(*args, **kwargs): if isinstance(cjac, tuple): # Classical processing of multiple arguments is present. Return qjac @ cjac. + print(cjac[0].shape, cjac[1].shape) + print(qjac.shape) jacs = [ qml.math.squeeze(qml.math.tensordot(c, qjac, [[0], [-1]])) for c in cjac diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index de7d4af30d0..97a56bb13e8 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -174,9 +174,6 @@ def grad_fn(dy): # Generate and execute the required gradient tapes if _n == max_diff: - for i, tape in enumerate(tapes): - tapes[i] = gradient_fn.expand_fn(tape) - with qml.tape.Unwrap(*tapes): vjp_tapes, processing_fn = qml.gradients.batch_vjp( tapes, diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 7ab1344e3b4..a34aa35a8ab 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1346,11 +1346,6 @@ def circuit(x): tape = spy.call_args[0][0][0] - # no expansion is done on the forward pass! - assert len(tape.operations) == 2 - assert tape.operations[1].name == "PhaseShift" - assert tape.operations[1].grad_method is None - spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = qml.grad(circuit)(x) @@ -1367,12 +1362,12 @@ def circuit(x): assert len(shifted_tape2.operations) == 2 assert shifted_tape2.operations[1].name == "RY" - assert np.allclose(res, -np.sin(3 * x)) + assert np.allclose(res, -3 * np.sin(3 * x)) if diff_method == "parameter-shift": # test second order derivatives res = qml.grad(qml.grad(circuit))(x) - assert np.allclose(res, -3 * np.cos(3 * x)) + assert np.allclose(res, -9 * np.cos(3 * x)) @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only(self, dev_name, diff_method, mode, max_diff, mocker): @@ -1403,15 +1398,6 @@ def circuit(x, y): y = np.array(0.7, requires_grad=False) circuit(x, y) - # no expansion is done on the forward pass! - tape = spy.call_args[0][0][0] - assert len(tape.operations) == 3 - assert tape.operations[1].name == "PhaseShift" - assert tape.operations[1].grad_method is None - assert tape.operations[2].name == "PhaseShift" - assert tape.operations[2].grad_method is None - - # Expansion of only the first phase shift is done on the gradient pass spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = qml.grad(circuit)(x, y) @@ -1419,5 +1405,5 @@ def circuit(x, y): assert len(input_tape.operations) == 3 assert input_tape.operations[1].name == "RY" assert input_tape.operations[1].data[0] == 3 * x - assert tape.operations[2].name == "PhaseShift" - assert tape.operations[2].grad_method is None + assert input_tape.operations[2].name == "PhaseShift" + assert input_tape.operations[2].grad_method is None From 4c671dcb69948df02116a4979eab74fbfd0316e1 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 01:21:02 +0800 Subject: [PATCH 34/52] more --- pennylane/gradients/gradient_transform.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index d6e72872705..b4a194fb85e 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -22,14 +22,6 @@ trainable_op = lambda op: any(qml.math.requires_grad(p) for p in op.parameters) -# Define the stopping condition for the expansion -def stop_cond(obj): - if isinstance(obj, qml.measure.MeasurementProcess): - return True - - return (supported_op(obj) and trainable_op(obj)) or not trainable_op(obj) - - def gradient_expand(tape, depth=10): """Expand out a tape so that it supports differentiation of requested operations. @@ -46,17 +38,16 @@ def gradient_expand(tape, depth=10): Returns: .QuantumTape: the expanded tape """ - requires_expansion = False # check if the tape contains unsupported trainable operations - for idx in range(tape.num_params): - op = tape.get_operation(idx)[0] - requires_expansion = unsupported_op(op) + if any(unsupported_op(op) and trainable_op(op) for op in tape.operations): - if requires_expansion: - break + # Define the stopping condition for the expansion + stop_cond = lambda obj: ( + not isinstance(obj, qml.measure.MeasurementProcess) + and ((supported_op(obj) and trainable_op(obj)) or not trainable_op(obj)) + ) - if requires_expansion: new_tape = tape.expand(depth=depth, stop_at=stop_cond) params = new_tape.get_parameters(trainable_only=False) new_tape.trainable_params = qml.math.get_trainable_indices(params) @@ -174,8 +165,6 @@ def jacobian_wrapper(*args, **kwargs): if isinstance(cjac, tuple): # Classical processing of multiple arguments is present. Return qjac @ cjac. - print(cjac[0].shape, cjac[1].shape) - print(qjac.shape) jacs = [ qml.math.squeeze(qml.math.tensordot(c, qjac, [[0], [-1]])) for c in cjac From 4308af5ac95254cc97b34d0fa6430cdaae2c6656 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 01:24:57 +0800 Subject: [PATCH 35/52] comment --- pennylane/beta/qnode.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index b6142bbd6ee..3ae60468429 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -489,6 +489,8 @@ def construct(self, args, kwargs): "Operator {} must act on all wires".format(obj.name) ) + # If the gradient function is a transform, expand the tape so that + # all operations are supported by the transform. if isinstance(self.gradient_fn, qml.gradients.gradient_transform): self._tape = self.gradient_fn.expand_fn(self._tape) From c35b5cbf40013992d9f56274a2b632116cf0badd Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 02:17:16 +0800 Subject: [PATCH 36/52] more tests --- pennylane/templates/embeddings/amplitude.py | 1 + .../templates/state_preparations/mottonen.py | 1 + .../interfaces/test_batch_tensorflow_qnode.py | 16 +--------------- tests/interfaces/test_batch_torch_qnode.py | 19 ++----------------- 4 files changed, 5 insertions(+), 32 deletions(-) diff --git a/pennylane/templates/embeddings/amplitude.py b/pennylane/templates/embeddings/amplitude.py index 307b4f55b4a..a332a06ca6c 100644 --- a/pennylane/templates/embeddings/amplitude.py +++ b/pennylane/templates/embeddings/amplitude.py @@ -124,6 +124,7 @@ def circuit(f=None): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__( self, features, wires, pad_with=None, normalize=False, pad=None, do_queue=True, id=None diff --git a/pennylane/templates/state_preparations/mottonen.py b/pennylane/templates/state_preparations/mottonen.py index ac320f50493..b65a4023006 100644 --- a/pennylane/templates/state_preparations/mottonen.py +++ b/pennylane/templates/state_preparations/mottonen.py @@ -248,6 +248,7 @@ class MottonenStatePreparation(Operation): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, state_vector, wires, do_queue=True, id=None): diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py index 62db71219a1..5e29cb0f3de 100644 --- a/tests/interfaces/test_batch_tensorflow_qnode.py +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -363,7 +363,6 @@ def circuit(U, a): res = tape.jacobian(res, a) assert np.allclose(res, tf.sin(a), atol=tol, rtol=0) - @pytest.mark.xfail def test_differentiable_expand(self, dev_name, diff_method, mode, tol): """Test that operation and nested tapes expansion is differentiable""" @@ -392,20 +391,7 @@ def circuit(a, p): with tf.GradientTape() as tape: res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - - assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert circuit.qtape.trainable_params == {1, 2, 3} expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * ( tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2]) diff --git a/tests/interfaces/test_batch_torch_qnode.py b/tests/interfaces/test_batch_torch_qnode.py index b4870d2cfbf..fb1b6d70ecd 100644 --- a/tests/interfaces/test_batch_torch_qnode.py +++ b/tests/interfaces/test_batch_torch_qnode.py @@ -379,7 +379,6 @@ def circuit(U, a): res.backward() assert np.allclose(a.grad, np.sin(a_val), atol=tol, rtol=0) - @pytest.mark.xfail def test_differentiable_expand(self, dev_name, diff_method, mode, tol): """Test that operation and nested tapes expansion is differentiable""" @@ -408,20 +407,7 @@ def circuit(a, p): res = circuit(a, p) - if diff_method == "finite-diff": - assert circuit.qtape.trainable_params == {1, 2, 3, 4} - elif diff_method == "backprop": - # For a backprop device, no interface wrapping is performed, and JacobianTape.jacobian() - # is never called. As a result, JacobianTape.trainable_params is never set --- the ML - # framework uses its own backprop logic and its own bookkeeping re: trainable parameters. - assert circuit.qtape.trainable_params == {0, 1, 2, 3, 4} - - assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"] - - if diff_method == "finite-diff": - assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]]) - elif diff_method == "backprop": - assert np.all(circuit.qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]]) + assert circuit.qtape.trainable_params == {1, 2, 3} expected = np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0]) + np.sin(a) * ( np.cos(p_val[2]) * np.sin(p_val[1]) @@ -756,7 +742,6 @@ def circuit(): assert isinstance(res[0], torch.Tensor) assert isinstance(res[1], torch.Tensor) - @pytest.mark.xfail def test_chained_qnodes(self, dev_name, diff_method, mode): """Test that the gradient of chained QNodes works without error""" dev = qml.device(dev_name, wires=2) @@ -776,7 +761,7 @@ def cost(weights): w1, w2 = weights c1 = circuit1(w1) c2 = circuit2(c1, w2) - return np.sum(c2) ** 2 + return torch.sum(c2) ** 2 w1 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=3) w2 = qml.init.strong_ent_layers_normal(n_wires=2, n_layers=4) From 1e812d7dfdedc8ca7b8cd97dc7938d1e58eab61d Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 22:04:41 +0800 Subject: [PATCH 37/52] more tests --- pennylane/interfaces/batch/autograd.py | 2 +- pennylane/templates/layers/basic_entangler.py | 1 + tests/interfaces/test_batch_autograd_qnode.py | 112 ++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index 97a56bb13e8..75d9dfa6676 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -174,7 +174,7 @@ def grad_fn(dy): # Generate and execute the required gradient tapes if _n == max_diff: - with qml.tape.Unwrap(*tapes): + with qml.tape.Unwrap(*tapes, set_trainable=False): vjp_tapes, processing_fn = qml.gradients.batch_vjp( tapes, dy, diff --git a/pennylane/templates/layers/basic_entangler.py b/pennylane/templates/layers/basic_entangler.py index 6440b7f2c78..214f18b1e07 100644 --- a/pennylane/templates/layers/basic_entangler.py +++ b/pennylane/templates/layers/basic_entangler.py @@ -124,6 +124,7 @@ def circuit(weights): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, weights, wires=None, rotation=None, do_queue=True, id=None): diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index a34aa35a8ab..27a6fbd5a5b 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1407,3 +1407,115 @@ def circuit(x, y): assert input_tape.operations[1].data[0] == 3 * x assert input_tape.operations[2].name == "PhaseShift" assert input_tape.operations[2].grad_method is None + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_hamiltonian_expansion_analytic(self, dev_name, diff_method, mode, max_diff, mocker): + """Test that the Hamiltonian is not expanded if there + are non-commuting groups and the number of shots is None""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not yet support Hamiltonians") + + dev = qml.device(dev_name, wires=3, shots=None) + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff) + def circuit(data, weights, coeffs): + weights = weights.reshape(1, -1) + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + return qml.expval(qml.Hamiltonian(coeffs, obs)) + + d = np.array([0.1, 0.2], requires_grad=False) + w = np.array([0.654, -0.734], requires_grad=True) + c = np.array([-0.6543, 0.24, 0.54], requires_grad=True) + + # test output + res = circuit(d, w, c) + expected = c[2] * np.cos(d[1] + w[1]) - c[1] * np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]) + assert np.allclose(res, expected) + spy.assert_not_called() + + # test gradients + grad = qml.grad(circuit)(d, w, c) + expected_w = [ + -c[1] * np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), + -c[1] * np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]) - c[2] * np.sin(d[1] + w[1]), + ] + expected_c = [0, -np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]), np.cos(d[1] + w[1])] + assert np.allclose(grad[0], expected_w) + assert np.allclose(grad[1], expected_c) + + # test second-order derivatives + if diff_method in ("parameter-shift", "backprop") and max_diff == 2: + + grad2_c = qml.jacobian(qml.grad(circuit, argnum=2), argnum=2)(d, w, c) + assert np.allclose(grad2_c, 0) + + grad2_w_c = qml.jacobian(qml.grad(circuit, argnum=1), argnum=2)(d, w, c) + expected = [0, -np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), 0], [ + 0, + -np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]), + -np.sin(d[1] + w[1]), + ] + assert np.allclose(grad2_w_c, expected) + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_hamiltonian_expansion_finite_shots( + self, dev_name, diff_method, mode, max_diff, mocker + ): + """Test that the Hamiltonian is expanded if there + are non-commuting groups and the number of shots is finite""" + if diff_method in ("adjoint", "backprop", "finite-diff"): + pytest.skip("The adjoint and backprop methods do not yet support sampling") + + dev = qml.device(dev_name, wires=3, shots=50000) + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff) + def circuit(data, weights, coeffs): + # weights = weights.reshape(1, -1) + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.RX(weights[0], wires=0) + qml.RX(weights[1], wires=1) + qml.CNOT(wires=[0, 1]) + + # qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + H = qml.Hamiltonian(coeffs, obs) + H.compute_grouping() + return qml.expval(H) + + d = np.array([0.1, 0.2], requires_grad=False) + w = np.array([0.654, -0.734], requires_grad=True) + c = np.array([-0.6543, 0.24, 0.54], requires_grad=True) + + # # test output + res = circuit(d, w, c) + expected = c[2] * np.cos(d[1] + w[1]) - c[1] * np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]) + assert np.allclose(res, expected, atol=0.1) + spy.assert_called() + + # test gradients + grad = qml.grad(circuit)(d, w, c) + expected_w = [ + -c[1] * np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), + -c[1] * np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]) - c[2] * np.sin(d[1] + w[1]), + ] + expected_c = [0, -np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]), np.cos(d[1] + w[1])] + assert np.allclose(grad[0], expected_w, atol=0.1) + assert np.allclose(grad[1], expected_c, atol=0.1) + + # test second-order derivatives + if diff_method == "parameter-shift" and max_diff == 2: + + grad2_c = qml.jacobian(qml.grad(circuit, argnum=2), argnum=2)(d, w, c) + assert np.allclose(grad2_c, 0, atol=0.1) + + grad2_w_c = qml.jacobian(qml.grad(circuit, argnum=1), argnum=2)(d, w, c) + expected = [0, -np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), 0], [ + 0, + -np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]), + -np.sin(d[1] + w[1]), + ] + assert np.allclose(grad2_w_c, expected, atol=0.1) From a9b26cae8766fef55f790ec174229866ef2f1f94 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 23:39:30 +0800 Subject: [PATCH 38/52] more tests --- tests/beta/test_beta_qnode.py | 200 ++++++++++++++++ tests/interfaces/test_batch_autograd_qnode.py | 147 +----------- .../interfaces/test_batch_tensorflow_qnode.py | 218 +++++++++++++++++- 3 files changed, 421 insertions(+), 144 deletions(-) diff --git a/tests/beta/test_beta_qnode.py b/tests/beta/test_beta_qnode.py index ddd404d494d..c505eb9b201 100644 --- a/tests/beta/test_beta_qnode.py +++ b/tests/beta/test_beta_qnode.py @@ -898,3 +898,203 @@ def circuit(x, y): assert info["num_trainable_params"] == 4 else: assert info["device_name"] == "default.qubit.autograd" + + +class TestTapeExpansion: + """Test that tape expansion within the QNode works correctly""" + + @pytest.mark.parametrize( + "diff_method,mode", + [("parameter-shift", "backward"), ("adjoint", "forward"), ("adjoint", "backward")], + ) + def test_device_expansion(self, diff_method, mode, mocker): + """Test expansion of an unsupported operation on the device""" + dev = qml.device("default.qubit", wires=1) + + class UnsupportedOp(qml.operation.Operation): + num_wires = 1 + num_params = 1 + par_domain = "R" + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RX(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode) + def circuit(x): + UnsupportedOp(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + if diff_method == "adjoint" and mode == "forward": + spy = mocker.spy(circuit.device, "execute_and_gradients") + else: + spy = mocker.spy(circuit.device, "batch_execute") + + x = np.array(0.5) + circuit(x) + + tape = spy.call_args[0][0][0] + assert len(tape.operations) == 1 + assert tape.operations[0].name == "RX" + assert np.allclose(tape.operations[0].parameters, 3 * x) + + def test_no_gradient_expansion(self, mocker): + """Test that an unsupported operation with defined gradient recipe is + not expanded for both parameter-shift and finite-differences""" + dev = qml.device("default.qubit", wires=1) + + class UnsupportedOp(qml.operation.Operation): + num_wires = 1 + num_params = 1 + par_domain = "R" + + grad_method = "A" + grad_recipe = ([[3 / 2, 1, np.pi / 6], [-3 / 2, 1, -np.pi / 6]],) + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RX(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method="parameter-shift", max_diff=2) + def circuit(x): + UnsupportedOp(x, wires=0) + return qml.expval(qml.PauliZ(0)) + + x = np.array(0.5) + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + qml.grad(circuit)(x) + + # check that the gradient recipe was applied *prior* to + # device expansion + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 1 + assert input_tape.operations[0].name == "UnsupportedOp" + assert input_tape.operations[0].data[0] == x + + shifted_tape1, shifted_tape2 = spy.spy_return[0] + + assert len(shifted_tape1.operations) == 1 + assert shifted_tape1.operations[0].name == "UnsupportedOp" + + assert len(shifted_tape2.operations) == 1 + assert shifted_tape2.operations[0].name == "UnsupportedOp" + + # check second derivative + assert np.allclose(qml.grad(qml.grad(circuit))(x), -9 * np.cos(3 * x)) + + def test_gradient_expansion(self, mocker): + """Test that a *supported* operation with no gradient recipe is + expanded for both parameter-shift and finite-differences, but not for execution.""" + dev = qml.device("default.qubit", wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method="parameter-shift", max_diff=2) + def circuit(x): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = np.array(0.5) + circuit(x) + + tape = spy.call_args[0][0][0] + + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + res = qml.grad(circuit)(x) + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 2 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + + shifted_tape1, shifted_tape2 = spy.spy_return[0] + + assert len(shifted_tape1.operations) == 2 + assert shifted_tape1.operations[1].name == "RY" + + assert len(shifted_tape2.operations) == 2 + assert shifted_tape2.operations[1].name == "RY" + + assert np.allclose(res, -3 * np.sin(3 * x)) + + # test second order derivatives + res = qml.grad(qml.grad(circuit))(x) + assert np.allclose(res, -9 * np.cos(3 * x)) + + def test_hamiltonian_expansion_analytic(self, mocker): + """Test that the Hamiltonian is not expanded if there + are non-commuting groups and the number of shots is None""" + dev = qml.device("default.qubit", wires=3, shots=None) + + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + c = np.array([-0.6543, 0.24, 0.54]) + H = qml.Hamiltonian(c, obs) + H.compute_grouping() + + assert len(H.grouping_indices) == 2 + + @qnode(dev) + def circuit(): + return qml.expval(H) + + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + res = circuit() + assert np.allclose(res, c[2], atol=0.1) + + spy.assert_not_called() + + def test_hamiltonian_expansion_finite_shots(self, mocker): + """Test that the Hamiltonian is expanded if there + are non-commuting groups and the number of shots is finite""" + dev = qml.device("default.qubit", wires=3, shots=50000) + + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + c = np.array([-0.6543, 0.24, 0.54]) + H = qml.Hamiltonian(c, obs) + H.compute_grouping() + + assert len(H.grouping_indices) == 2 + + @qnode(dev) + def circuit(): + return qml.expval(H) + + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + res = circuit() + assert np.allclose(res, c[2], atol=0.1) + + spy.assert_called() + tapes, fn = spy.spy_return + + assert len(tapes) == 2 + + def test_invalid_hamiltonian_expansion_finite_shots(self, mocker): + """Test that an error is raised if multiple expectations are requested + when using finite shots""" + dev = qml.device("default.qubit", wires=3, shots=50000) + + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + c = np.array([-0.6543, 0.24, 0.54]) + H = qml.Hamiltonian(c, obs) + H.compute_grouping() + + assert len(H.grouping_indices) == 2 + + @qnode(dev) + def circuit(): + return qml.expval(H), qml.expval(H) + + with pytest.raises( + ValueError, match="Can only return the expectation of a single Hamiltonian" + ): + circuit() diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 27a6fbd5a5b..adca96090ae 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1231,143 +1231,8 @@ def circ(x): @pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) class TestTapeExpansion: - """Test that tape expansion within the QNode works correctly""" - - def test_device_expansion(self, dev_name, diff_method, mode, mocker): - """Test expansion of an unsupported operation on the device""" - dev = qml.device(dev_name, wires=1) - - class UnsupportedOp(qml.operation.Operation): - num_wires = 1 - num_params = 1 - par_domain = "R" - - def expand(self): - with qml.tape.QuantumTape() as tape: - qml.RX(3 * self.data[0], wires=self.wires) - return tape - - @qnode(dev, diff_method=diff_method, mode=mode) - def circuit(x): - UnsupportedOp(x, wires=0) - return qml.expval(qml.PauliZ(0)) - - if diff_method == "adjoint" and mode == "forward": - spy = mocker.spy(circuit.device, "execute_and_gradients") - else: - spy = mocker.spy(circuit.device, "batch_execute") - - x = np.array(0.5, requires_grad=True) - circuit(x) - - tape = spy.call_args[0][0][0] - assert len(tape.operations) == 1 - assert tape.operations[0].name == "RX" - assert np.allclose(tape.operations[0].parameters, 3 * x) - - def test_no_gradient_expansion(self, dev_name, diff_method, mode, mocker): - """Test that an unsupported operation with defined gradient recipe is - not expanded for both parameter-shift and finite-differences""" - dev = qml.device(dev_name, wires=1) - - class UnsupportedOp(qml.operation.Operation): - num_wires = 1 - num_params = 1 - par_domain = "R" - - grad_method = "A" - grad_recipe = ([[3 / 2, 1, np.pi / 6], [-3 / 2, 1, -np.pi / 6]],) - - def expand(self): - with qml.tape.QuantumTape() as tape: - qml.RX(3 * self.data[0], wires=self.wires) - return tape - - @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2) - def circuit(x): - UnsupportedOp(x, wires=0) - return qml.expval(qml.PauliZ(0)) - - x = np.array(0.5, requires_grad=True) - - if isinstance(circuit.gradient_fn, qml.gradients.gradient_transform): - # check that the gradient recipe was applied *prior* to - # device expansion - spy = mocker.spy(circuit.gradient_fn, "transform_fn") - res = qml.grad(circuit)(x) - - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 1 - assert input_tape.operations[0].name == "UnsupportedOp" - assert input_tape.operations[0].data[0] == x - - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 1 - assert shifted_tape1.operations[0].name == "UnsupportedOp" - - assert len(shifted_tape2.operations) == 1 - assert shifted_tape2.operations[0].name == "UnsupportedOp" - - else: - res = qml.grad(circuit)(x) - - assert np.allclose(res, -3 * np.sin(3 * x)) - - if diff_method in ("backprop", "parameter-shift"): - # check that second derivatives work - assert np.allclose(qml.grad(qml.grad(circuit))(x), -9 * np.cos(3 * x)) - - def test_gradient_expansion(self, dev_name, diff_method, mode, mocker): - """Test that a *supported* operation with no gradient recipe is - expanded for both parameter-shift and finite-differences, but not for execution.""" - if diff_method not in ("parameter-shift", "finite-diff"): - pytest.skip("Only supports gradient transforms") - - dev = qml.device(dev_name, wires=1) - - class PhaseShift(qml.PhaseShift): - grad_method = None - - def expand(self): - with qml.tape.QuantumTape() as tape: - qml.RY(3 * self.data[0], wires=self.wires) - return tape - - @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2) - def circuit(x): - qml.Hadamard(wires=0) - PhaseShift(x, wires=0) - return qml.expval(qml.PauliX(0)) - - spy = mocker.spy(circuit.device, "batch_execute") - x = np.array(0.5, requires_grad=True) - circuit(x) - - tape = spy.call_args[0][0][0] - - spy = mocker.spy(circuit.gradient_fn, "transform_fn") - res = qml.grad(circuit)(x) - - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - - assert np.allclose(res, -3 * np.sin(3 * x)) - - if diff_method == "parameter-shift": - # test second order derivatives - res = qml.grad(qml.grad(circuit))(x) - assert np.allclose(res, -9 * np.cos(3 * x)) + """Test that tape expansion within the QNode integrates correctly + with the Autograd interface""" @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only(self, dev_name, diff_method, mode, max_diff, mocker): @@ -1475,13 +1340,9 @@ def test_hamiltonian_expansion_finite_shots( @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff) def circuit(data, weights, coeffs): - # weights = weights.reshape(1, -1) + weights = weights.reshape(1, -1) qml.templates.AngleEmbedding(data, wires=[0, 1]) - qml.RX(weights[0], wires=0) - qml.RX(weights[1], wires=1) - qml.CNOT(wires=[0, 1]) - - # qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) H = qml.Hamiltonian(coeffs, obs) H.compute_grouping() return qml.expval(H) diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py index 5e29cb0f3de..414e68dd04c 100644 --- a/tests/interfaces/test_batch_tensorflow_qnode.py +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -88,7 +88,7 @@ def circuit(a): from pennylane import numpy as anp - a = anp.array(0.1, requires_grad=True) + a = anp.array(0.1, dtype=tf.float64) res1 = circuit(a) grad_fn = qml.grad(circuit) @@ -1041,3 +1041,219 @@ def circuit(n, a): grad = tape.gradient(res, [n, a]) expected = [2 * a ** 2 + 2 * n + 1, 2 * a * (2 * n + 1)] assert np.allclose(grad, expected, atol=tol, rtol=0) + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestTapeExpansion: + """Test that tape expansion within the QNode integrates correctly + with the TF interface""" + + def test_gradient_expansion(self, dev_name, diff_method, mode, mocker): + """Test that a *supported* operation with no gradient recipe is + expanded for both parameter-shift and finite-differences, but not for execution.""" + if diff_method not in ("parameter-shift", "finite-diff"): + pytest.skip("Only supports gradient transforms") + + dev = qml.device(dev_name, wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="tf") + def circuit(x): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = tf.Variable(0.5, dtype=tf.float64) + + with tf.GradientTape() as t2: + with tf.GradientTape() as t1: + loss = circuit(x) + + tape = spy.call_args[0][0][0] + + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + res = t1.gradient(loss, x) + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 2 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + + shifted_tape1, shifted_tape2 = spy.spy_return[0] + + assert len(shifted_tape1.operations) == 2 + assert shifted_tape1.operations[1].name == "RY" + + assert len(shifted_tape2.operations) == 2 + assert shifted_tape2.operations[1].name == "RY" + + assert np.allclose(res, -3 * np.sin(3 * x)) + + if diff_method == "parameter-shift": + # test second order derivatives + res = t2.gradient(res, x) + assert np.allclose(res, -9 * np.cos(3 * x)) + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_gradient_expansion_trainable_only(self, dev_name, diff_method, mode, max_diff, mocker): + """Test that a *supported* operation with no gradient recipe is only + expanded for parameter-shift and finite-differences when it is trainable.""" + if diff_method not in ("parameter-shift", "finite-diff"): + pytest.skip("Only supports gradient transforms") + + dev = qml.device(dev_name, wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff, interface="tf") + def circuit(x, y): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + PhaseShift(2 * y, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = tf.Variable(0.5, dtype=tf.float64) + y = tf.constant(0.7, dtype=tf.float64) + + with tf.GradientTape() as t: + res = circuit(x, y) + + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + res = t.gradient(res, [x, y]) + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 3 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + assert input_tape.operations[2].name == "PhaseShift" + assert input_tape.operations[2].grad_method is None + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_hamiltonian_expansion_analytic(self, dev_name, diff_method, mode, max_diff, mocker): + """Test that the Hamiltonian is not expanded if there + are non-commuting groups and the number of shots is None""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not yet support Hamiltonians") + + dev = qml.device(dev_name, wires=3, shots=None) + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff, interface="tf") + def circuit(data, weights, coeffs): + weights = tf.reshape(weights, [1, -1]) + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + return qml.expval(qml.Hamiltonian(coeffs, obs)) + + d = tf.constant([0.1, 0.2], dtype=tf.float64) + w = tf.Variable([0.654, -0.734], dtype=tf.float64) + c = tf.Variable([-0.6543, 0.24, 0.54], dtype=tf.float64) + + # test output + with tf.GradientTape(persistent=True) as t2: + with tf.GradientTape() as t1: + res = circuit(d, w, c) + + expected = c[2] * np.cos(d[1] + w[1]) - c[1] * np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]) + assert np.allclose(res, expected) + spy.assert_not_called() + + # test gradients + grad = t1.gradient(res, [d, w, c]) + + expected_w = [ + -c[1] * np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), + -c[1] * np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]) - c[2] * np.sin(d[1] + w[1]), + ] + expected_c = [0, -np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]), np.cos(d[1] + w[1])] + assert np.allclose(grad[1], expected_w) + assert np.allclose(grad[2], expected_c) + + # test second-order derivatives + if diff_method in ("parameter-shift", "backprop") and max_diff == 2: + + grad2_c = t2.jacobian(grad[2], c) + assert grad2_c is None or np.allclose(grad2_c, 0) + + grad2_w_c = t2.jacobian(grad[1], c) + expected = [0, -np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), 0], [ + 0, + -np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]), + -np.sin(d[1] + w[1]), + ] + assert np.allclose(grad2_w_c, expected) + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_hamiltonian_expansion_finite_shots( + self, dev_name, diff_method, mode, max_diff, mocker + ): + """Test that the Hamiltonian is expanded if there + are non-commuting groups and the number of shots is finite""" + if diff_method in ("adjoint", "backprop", "finite-diff"): + pytest.skip("The adjoint and backprop methods do not yet support sampling") + + dev = qml.device(dev_name, wires=3, shots=50000) + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff, interface="tf") + def circuit(data, weights, coeffs): + weights = tf.reshape(weights, [1, -1]) + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + H = qml.Hamiltonian(coeffs, obs) + H.compute_grouping() + return qml.expval(H) + + d = tf.constant([0.1, 0.2], dtype=tf.float64) + w = tf.Variable([0.654, -0.734], dtype=tf.float64) + c = tf.Variable([-0.6543, 0.24, 0.54], dtype=tf.float64) + + # # test output + with tf.GradientTape(persistent=True) as t2: + with tf.GradientTape() as t1: + res = circuit(d, w, c) + + expected = c[2] * np.cos(d[1] + w[1]) - c[1] * np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]) + assert np.allclose(res, expected, atol=0.1) + spy.assert_called() + + # test gradients + grad = t1.gradient(res, [d, w, c]) + + expected_w = [ + -c[1] * np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), + -c[1] * np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]) - c[2] * np.sin(d[1] + w[1]), + ] + expected_c = [0, -np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]), np.cos(d[1] + w[1])] + assert np.allclose(grad[1], expected_w, atol=0.1) + assert np.allclose(grad[2], expected_c, atol=0.1) + + # test second-order derivatives + if diff_method == "parameter-shift" and max_diff == 2: + grad2_c = t2.jacobian(grad[2], c) + assert grad2_c is None + + grad2_w_c = t2.jacobian(grad[1], c) + expected = [0, -np.cos(d[0] + w[0]) * np.sin(d[1] + w[1]), 0], [ + 0, + -np.cos(d[1] + w[1]) * np.sin(d[0] + w[0]), + -np.sin(d[1] + w[1]), + ] + assert np.allclose(grad2_w_c, expected, atol=0.1) From 063745b9bed2943432f87cd7085682cbc2bda811 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 15 Sep 2021 23:59:40 +0800 Subject: [PATCH 39/52] more tests --- pennylane/interfaces/batch/torch.py | 2 +- .../interfaces/test_batch_tensorflow_qnode.py | 2 +- tests/interfaces/test_batch_torch_qnode.py | 239 ++++++++++++++++++ 3 files changed, 241 insertions(+), 2 deletions(-) diff --git a/pennylane/interfaces/batch/torch.py b/pennylane/interfaces/batch/torch.py index b81d6d16c80..9d263d2ad5d 100644 --- a/pennylane/interfaces/batch/torch.py +++ b/pennylane/interfaces/batch/torch.py @@ -160,7 +160,7 @@ def backward(ctx, *dy): # The derivative order is at the maximum. Compute the VJP # in a non-differentiable manner to reduce overhead. - with qml.tape.Unwrap(*ctx.tapes): + with qml.tape.Unwrap(*ctx.tapes, set_trainable=False): vjp_tapes, processing_fn = qml.gradients.batch_vjp( ctx.tapes, dy, diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py index 414e68dd04c..54fb5b2f50f 100644 --- a/tests/interfaces/test_batch_tensorflow_qnode.py +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -88,7 +88,7 @@ def circuit(a): from pennylane import numpy as anp - a = anp.array(0.1, dtype=tf.float64) + a = anp.array(0.1, requires_grad=True) res1 = circuit(a) grad_fn = qml.grad(circuit) diff --git a/tests/interfaces/test_batch_torch_qnode.py b/tests/interfaces/test_batch_torch_qnode.py index fb1b6d70ecd..f9586c75d13 100644 --- a/tests/interfaces/test_batch_torch_qnode.py +++ b/tests/interfaces/test_batch_torch_qnode.py @@ -1082,3 +1082,242 @@ def circuit(n, a): res = torch.tensor([n.grad, a.grad]) expected = torch.tensor([[2 * a ** 2 + 2 * n + 1, 2 * a * (2 * n + 1)]]) assert torch.allclose(res, expected, atol=tol, rtol=0) + + +@pytest.mark.parametrize("dev_name,diff_method,mode", qubit_device_and_diff_method) +class TestTapeExpansion: + """Test that tape expansion within the QNode integrates correctly + with the Torch interface""" + + def test_gradient_expansion(self, dev_name, diff_method, mode, mocker): + """Test that a *supported* operation with no gradient recipe is + expanded for both parameter-shift and finite-differences, but not for execution.""" + if diff_method not in ("parameter-shift", "finite-diff"): + pytest.skip("Only supports gradient transforms") + + dev = qml.device(dev_name, wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=2, interface="torch") + def circuit(x): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = torch.tensor(0.5, requires_grad=True) + + loss = circuit(x) + + tape = spy.call_args[0][0][0] + + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + loss.backward() + res = x.grad + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 2 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + + shifted_tape1, shifted_tape2 = spy.spy_return[0] + + assert len(shifted_tape1.operations) == 2 + assert shifted_tape1.operations[1].name == "RY" + + assert len(shifted_tape2.operations) == 2 + assert shifted_tape2.operations[1].name == "RY" + + assert torch.allclose(res, -3 * torch.sin(3 * x)) + + if diff_method == "parameter-shift": + # test second order derivatives + res = torch.autograd.functional.hessian(circuit, x) + assert torch.allclose(res, -9 * torch.cos(3 * x)) + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_gradient_expansion_trainable_only(self, dev_name, diff_method, mode, max_diff, mocker): + """Test that a *supported* operation with no gradient recipe is only + expanded for parameter-shift and finite-differences when it is trainable.""" + if diff_method not in ("parameter-shift", "finite-diff"): + pytest.skip("Only supports gradient transforms") + + dev = qml.device(dev_name, wires=1) + + class PhaseShift(qml.PhaseShift): + grad_method = None + + def expand(self): + with qml.tape.QuantumTape() as tape: + qml.RY(3 * self.data[0], wires=self.wires) + return tape + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff, interface="torch") + def circuit(x, y): + qml.Hadamard(wires=0) + PhaseShift(x, wires=0) + PhaseShift(2 * y, wires=0) + return qml.expval(qml.PauliX(0)) + + spy = mocker.spy(circuit.device, "batch_execute") + x = torch.tensor(0.5, requires_grad=True) + y = torch.tensor(0.7, requires_grad=False) + + loss = circuit(x, y) + + spy = mocker.spy(circuit.gradient_fn, "transform_fn") + loss.backward() + + input_tape = spy.call_args[0][0] + assert len(input_tape.operations) == 3 + assert input_tape.operations[1].name == "RY" + assert input_tape.operations[1].data[0] == 3 * x + assert input_tape.operations[2].name == "PhaseShift" + assert input_tape.operations[2].grad_method is None + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_hamiltonian_expansion_analytic(self, dev_name, diff_method, mode, max_diff, mocker): + """Test that the Hamiltonian is not expanded if there + are non-commuting groups and the number of shots is None""" + if diff_method == "adjoint": + pytest.skip("The adjoint method does not yet support Hamiltonians") + + dev = qml.device(dev_name, wires=3, shots=None) + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff, interface="torch") + def circuit(data, weights, coeffs): + weights = torch.reshape(weights, [1, -1]) + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + return qml.expval(qml.Hamiltonian(coeffs, obs)) + + d = torch.tensor([0.1, 0.2], requires_grad=False, dtype=torch.float64) + w = torch.tensor([0.654, -0.734], requires_grad=True, dtype=torch.float64) + c = torch.tensor([-0.6543, 0.24, 0.54], requires_grad=True, dtype=torch.float64) + + # test output + res = circuit(d, w, c) + + expected = c[2] * torch.cos(d[1] + w[1]) - c[1] * torch.sin(d[0] + w[0]) * torch.sin( + d[1] + w[1] + ) + assert torch.allclose(res, expected) + spy.assert_not_called() + + # test gradients + res.backward() + grad = (w.grad, c.grad) + + expected_w = torch.tensor( + [ + -c[1] * torch.cos(d[0] + w[0]) * torch.sin(d[1] + w[1]), + -c[1] * torch.cos(d[1] + w[1]) * torch.sin(d[0] + w[0]) + - c[2] * torch.sin(d[1] + w[1]), + ] + ) + expected_c = torch.tensor( + [0, -torch.sin(d[0] + w[0]) * torch.sin(d[1] + w[1]), torch.cos(d[1] + w[1])] + ) + assert torch.allclose(grad[0], expected_w) + assert torch.allclose(grad[1], expected_c) + + # test second-order derivatives + if diff_method in ("parameter-shift", "backprop") and max_diff == 2: + hessians = torch.autograd.functional.hessian(circuit, (d, w, c)) + + grad2_c = hessians[2][2] + assert torch.allclose(grad2_c, torch.zeros([3, 3], dtype=torch.float64)) + + grad2_w_c = hessians[1][2] + expected = torch.tensor( + [ + [0, -torch.cos(d[0] + w[0]) * torch.sin(d[1] + w[1]), 0], + [ + 0, + -torch.cos(d[1] + w[1]) * torch.sin(d[0] + w[0]), + -torch.sin(d[1] + w[1]), + ], + ] + ) + assert torch.allclose(grad2_w_c, expected) + + @pytest.mark.parametrize("max_diff", [1, 2]) + def test_hamiltonian_expansion_finite_shots( + self, dev_name, diff_method, mode, max_diff, mocker + ): + """Test that the Hamiltonian is expanded if there + are non-commuting groups and the number of shots is finite""" + if diff_method in ("adjoint", "backprop", "finite-diff"): + pytest.skip("The adjoint and backprop methods do not yet support sampling") + + dev = qml.device(dev_name, wires=3, shots=50000) + spy = mocker.spy(qml.transforms, "hamiltonian_expand") + obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliZ(0) @ qml.PauliZ(1)] + + @qnode(dev, diff_method=diff_method, mode=mode, max_diff=max_diff, interface="torch") + def circuit(data, weights, coeffs): + weights = torch.reshape(weights, [1, -1]) + qml.templates.AngleEmbedding(data, wires=[0, 1]) + qml.templates.BasicEntanglerLayers(weights, wires=[0, 1]) + H = qml.Hamiltonian(coeffs, obs) + H.compute_grouping() + return qml.expval(H) + + d = torch.tensor([0.1, 0.2], requires_grad=False, dtype=torch.float64) + w = torch.tensor([0.654, -0.734], requires_grad=True, dtype=torch.float64) + c = torch.tensor([-0.6543, 0.24, 0.54], requires_grad=True, dtype=torch.float64) + + # # test output + res = circuit(d, w, c) + + expected = c[2] * torch.cos(d[1] + w[1]) - c[1] * torch.sin(d[0] + w[0]) * torch.sin( + d[1] + w[1] + ) + assert torch.allclose(res, expected, atol=0.1) + spy.assert_called() + + # test gradients + res.backward() + grad = (w.grad, c.grad) + + expected_w = torch.tensor( + [ + -c[1] * torch.cos(d[0] + w[0]) * torch.sin(d[1] + w[1]), + -c[1] * torch.cos(d[1] + w[1]) * torch.sin(d[0] + w[0]) + - c[2] * torch.sin(d[1] + w[1]), + ] + ) + expected_c = torch.tensor( + [0, -torch.sin(d[0] + w[0]) * torch.sin(d[1] + w[1]), torch.cos(d[1] + w[1])] + ) + assert torch.allclose(grad[0], expected_w, atol=0.1) + assert torch.allclose(grad[1], expected_c, atol=0.1) + + # test second-order derivatives + if diff_method == "parameter-shift" and max_diff == 2: + hessians = torch.autograd.functional.hessian(circuit, (d, w, c)) + + grad2_c = hessians[2][2] + assert torch.allclose(grad2_c, torch.zeros([3, 3], dtype=torch.float64), atol=0.1) + + grad2_w_c = hessians[1][2] + expected = torch.tensor( + [ + [0, -torch.cos(d[0] + w[0]) * torch.sin(d[1] + w[1]), 0], + [ + 0, + -torch.cos(d[1] + w[1]) * torch.sin(d[0] + w[0]), + -torch.sin(d[1] + w[1]), + ], + ] + ) + assert torch.allclose(grad2_w_c, expected, atol=0.1) From 79b55d227ab8f1357bfb20d6927a8259a9bbeb29 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 17 Sep 2021 22:19:28 +0800 Subject: [PATCH 40/52] better suppotr --- pennylane/beta/qnode.py | 22 +++++++++++++++++++++ pennylane/interfaces/batch/__init__.py | 27 ++++++++++++++++++-------- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 3ae60468429..2f9db621ef0 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -94,6 +94,19 @@ class QNode: * ``None``: QNode cannot be differentiated. Works the same as ``interface=None``. + expansion_strategy (str): The strategy to use when circuit expansions or decompositions + are required. + + - ``gradient``: The QNode will attempt to decompose + the internal circuit such that all circuit operations are supported by the gradient + method. Further decompositions required for device execution are performed by the + device prior to circuit execution. + + - ``device``: The QNode will attempt to decompose the internal circuit + such that all circuit operations are natively supported by the device. + + The ``gradient`` strategy typically results in a reduction in quantum device evaluations + required during optimization, at the expense of an increase in classical pre-processing. max_expansion (int): The number of times the internal circuit should be expanded when executed on a device. Expansion occurs when an operation or measurement is not supported, and results in a gate decomposition. If any operations in the decomposition @@ -145,6 +158,7 @@ def __init__( device, interface="autograd", diff_method="best", + expansion_strategy="gradient", max_expansion=10, mode="best", cache=True, @@ -179,6 +193,7 @@ def __init__( self.device = device self._interface = interface self.diff_method = diff_method + self.expansion_strategy = expansion_strategy self.max_expansion = max_expansion # execution keyword arguments @@ -187,8 +202,12 @@ def __init__( "cache": cache, "cachesize": cachesize, "max_diff": max_diff, + "max_expansion": max_expansion, } + if self.expansion_strategy == "device": + self.execute_kwargs["expand_fn"] = None + # internal data attributes self._tape = None self._qfunc_output = None @@ -489,6 +508,9 @@ def construct(self, args, kwargs): "Operator {} must act on all wires".format(obj.name) ) + if self.expansion_strategy == "device": + self._tape = self.device.expand_fn(self.tape, max_expansion=max_expansion) + # If the gradient function is a transform, expand the tape so that # all operations are supported by the transform. if isinstance(self.gradient_fn, qml.gradients.gradient_transform): diff --git a/pennylane/interfaces/batch/__init__.py b/pennylane/interfaces/batch/__init__.py index 6caeed01a62..075d7ce19e2 100644 --- a/pennylane/interfaces/batch/__init__.py +++ b/pennylane/interfaces/batch/__init__.py @@ -195,6 +195,8 @@ def execute( cachesize=10000, max_diff=2, override_shots=False, + expand_fn="device", + max_expansion=10, ): """Execute a batch of tapes on a device in an autodifferentiable-compatible manner. @@ -223,6 +225,14 @@ def execute( the maximum number of derivatives to support. Increasing this value allows for higher order derivatives to be extracted, at the cost of additional (classical) computational overhead during the backwards pass. + expand_fn (function): Tape expansion function to be called prior to device execution. + Must have signature of the form ``expand_fn(tape, max_expansion)``, and return a + single :class:`~.QuantumTape`. If not provided, by default :meth:`Device.expand_fn` + is called. + max_expansion (int): The number of times the internal circuit should be expanded when + executed on a device. Expansion occurs when an operation or measurement is not + supported, and results in a gate decomposition. If any operations in the decomposition + remain unsupported by the device, another expansion occurs. Returns: list[list[float]]: A nested list of tape results. Each element in @@ -290,21 +300,22 @@ def cost_fn(params, x): batch_execute = set_shots(device, override_shots)(device.batch_execute) + if expand_fn == "device": + expand_fn = lambda tape: device.expand_fn(tape, max_expansion=max_expansion) + if gradient_fn is None: with qml.tape.Unwrap(*tapes): - res = cache_execute( - batch_execute, cache, return_tuple=False, expand_fn=device.expand_fn - )(tapes) + res = cache_execute(batch_execute, cache, return_tuple=False, expand_fn=expand_fn)( + tapes + ) return res if gradient_fn == "backprop" or interface is None: - return cache_execute(batch_execute, cache, return_tuple=False, expand_fn=device.expand_fn)( - tapes - ) + return cache_execute(batch_execute, cache, return_tuple=False, expand_fn=expand_fn)(tapes) # the default execution function is batch_execute - execute_fn = cache_execute(batch_execute, cache, expand_fn=device.expand_fn) + execute_fn = cache_execute(batch_execute, cache, expand_fn=expand_fn) if gradient_fn == "device": # gradient function is a device method @@ -314,7 +325,7 @@ def cost_fn(params, x): # decompositions with parameter processing is tracked by the # autodiff frameworks. for i, tape in enumerate(tapes): - tapes[i] = device.expand_fn(tape) + tapes[i] = expand_fn(tape) if mode in ("forward", "best"): # replace the forward execution function to return From 4d54374704ad909cab1bc14f1c9c86de775dd9fa Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 21 Sep 2021 23:08:50 +0800 Subject: [PATCH 41/52] update changelog --- doc/releases/changelog-dev.md | 2 +- pennylane/beta/qnode.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 2e04e892cdf..9258d7c2fee 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -81,6 +81,7 @@ `qml.beta.QNode`, and `@qml.beta.qnode`. [(#1642)](https://github.com/PennyLaneAI/pennylane/pull/1642) [(#1646)](https://github.com/PennyLaneAI/pennylane/pull/1646) + [(#1674)](https://github.com/PennyLaneAI/pennylane/pull/1674) It differs from the standard QNode in several ways: @@ -113,7 +114,6 @@ Currently, this beta QNode does not support the following features: - - Circuit decompositions - Non-mutability via the `mutable` keyword argument - Viewing specifications with `qml.specs` - The `reversible` QNode differentiation method diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 218b62cb899..9d145597ddf 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -51,9 +51,10 @@ class QNode: Currently, this beta QNode does not support the following features: - - Circuit decompositions - Non-mutability via the ``mutable`` keyword argument - Viewing specifications with ``qml.specs`` + - The ``reversible`` QNode differentiation method + - The ability to specify a ``dtype`` when using PyTorch and TensorFlow. It is also not tested with the :mod:`~.qnn` module. From 0da517ffcc26f8a30427e21a4ff49e01a2599144 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 21 Sep 2021 23:13:20 +0800 Subject: [PATCH 42/52] linting --- pennylane/_device.py | 2 +- pennylane/beta/qnode.py | 2 +- pennylane/interfaces/batch/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pennylane/_device.py b/pennylane/_device.py index 387ecfb2e8d..f8c15dfbf4a 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -586,7 +586,7 @@ def expand_fn(self, circuit, max_expansion=10): """ obs_on_same_wire = len(circuit._obs_sharing_wires) > 0 and not self.supports_observable( "Hamiltonian" - ) + ) # pylint: disable=protected-access ops_not_supported = any( isinstance(op, qml.tape.QuantumTape) # nested tapes must be expanded or not self.supports_operation(op.name) # unsupported ops must be expanded diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 9d145597ddf..d38420ee410 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -541,7 +541,7 @@ def construct(self, args, kwargs): ) if self.expansion_strategy == "device": - self._tape = self.device.expand_fn(self.tape, max_expansion=max_expansion) + self._tape = self.device.expand_fn(self.tape, max_expansion=self.max_expansion) # If the gradient function is a transform, expand the tape so that # all operations are supported by the transform. diff --git a/pennylane/interfaces/batch/__init__.py b/pennylane/interfaces/batch/__init__.py index 075d7ce19e2..ee9953655df 100644 --- a/pennylane/interfaces/batch/__init__.py +++ b/pennylane/interfaces/batch/__init__.py @@ -109,7 +109,7 @@ def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True, expand_fn=Non if expand_fn is not None: original_fn = fn - def fn(tapes, **kwargs): + def fn(tapes, **kwargs): # pylint: disable=function-redefined tapes = [expand_fn(tape) for tape in tapes] return original_fn(tapes, **kwargs) From 8d0880dc2668715aa151bc3d942a512e758b4e38 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 21 Sep 2021 23:22:53 +0800 Subject: [PATCH 43/52] changelog --- doc/releases/changelog-dev.md | 32 ++++++++++++++++++++++++++++++++ pennylane/_device.py | 7 ++++--- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 9258d7c2fee..9989356b7fa 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -107,6 +107,14 @@ significant performance improvement when executing the QNode on remote quantum hardware. + - When decomposing the circuit, the default decomposition strategy will prioritize + decompositions that result in the smallest number of parametrized operations + required to satisfy the differentiation method. Additional decompositions required + to satisfy the native gate set of the quantum device will be performed later, by the + device at execution time. While this may lead to a slight increase in classical processing, + it significantly reduces the number of circuit evaluations needed to compute + gradients of complex unitaries. + In an upcoming release, this QNode will replace the existing one. If you come across any bugs while using this QNode, please let us know via a [bug report](https://github.com/PennyLaneAI/pennylane/issues/new?assignees=&labels=bug+%3Abug%3A&template=bug_report.yml&title=%5BBUG%5D) @@ -121,6 +129,30 @@ It is also not tested with the `qml.qnn` module. +* Two new methods were added to the Device API, allowing PennyLane devices + increased control over circuit decompositions. + [(#1674)](https://github.com/PennyLaneAI/pennylane/pull/1674) + + - `Device.expand_fn(tape) -> tape`: expands a tape such that it is supported by the device. By + default, performs the standard device-specific gate set decomposition done in the default + QNode. Devices may overwrite this method in order to define their own decomposition logic. + + Note that the numerical result after applying this method should remain unchanged; PennyLane + will assume that the expanded tape returns exactly the same value as the original tape when + executed. + + - `Device.batch_transform(tape) -> (tapes, processing_fn)`: pre-processes the tape in the case + where the device needs to generate multiple circuits to execute from the input circuit. The + requirement of a post-processing function makes this distinct to the `expand_fn` method above. + + By default, this method applies the transform + + .. math:: \left\langle \sum_i c_i h_i\right\rangle -> \sum_i c_i \left\langle h_i \right\rangle + + if `expval(H)` is present on devices that do not natively support Hamiltonians with + non-commuting terms. + +

Improvements

* The `qml.metric_tensor` transform has been improved with regards to diff --git a/pennylane/_device.py b/pennylane/_device.py index f8c15dfbf4a..32a85c67195 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -584,9 +584,10 @@ def expand_fn(self, circuit, max_expansion=10): .QuantumTape: The expanded/decomposed circuit, such that the device will support native """ - obs_on_same_wire = len(circuit._obs_sharing_wires) > 0 and not self.supports_observable( - "Hamiltonian" - ) # pylint: disable=protected-access + obs_on_same_wire = len( + circuit._obs_sharing_wires # pylint: disable=protected-access + ) > 0 and not self.supports_observable("Hamiltonian") + ops_not_supported = any( isinstance(op, qml.tape.QuantumTape) # nested tapes must be expanded or not self.supports_operation(op.name) # unsupported ops must be expanded From 141671a747bf14f4e8cb6b2b16f5c94e650fdf20 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 21 Sep 2021 23:48:45 +0800 Subject: [PATCH 44/52] another test --- tests/beta/test_beta_qnode.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/beta/test_beta_qnode.py b/tests/beta/test_beta_qnode.py index f2f8368b433..aeebf02e2c7 100644 --- a/tests/beta/test_beta_qnode.py +++ b/tests/beta/test_beta_qnode.py @@ -1098,3 +1098,29 @@ def circuit(): ValueError, match="Can only return the expectation of a single Hamiltonian" ): circuit() + + def test_device_expansion_strategy(self, mocker): + """Test that the device expansion strategy performs the device + decomposition at construction time, and not at execution time""" + dev = qml.device("default.qubit", wires=2) + x = np.array(0.5) + + @qnode(dev, diff_method="parameter-shift", expansion_strategy="device") + def circuit(x): + qml.SingleExcitation(x, wires=[0, 1]) + return qml.expval(qml.PauliX(0)) + + assert circuit.expansion_strategy == "device" + assert circuit.execute_kwargs["expand_fn"] is None + + spy_expand = mocker.spy(circuit.device, "expand_fn") + + circuit.construct([x], {}) + assert len(circuit.tape.operations) > 0 + spy_expand.assert_called_once() + + circuit(x) + assert len(spy_expand.call_args_list) == 2 + + qml.grad(circuit)(x) + assert len(spy_expand.call_args_list) == 3 From a5f64245d66fe5c4add0709c983fccd50ad72069 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 22 Sep 2021 01:10:37 +0800 Subject: [PATCH 45/52] Apply suggestions from code review --- doc/releases/changelog-dev.md | 4 ++-- pennylane/beta/qnode.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 55f20d23925..c6ef073700c 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -81,7 +81,7 @@ `qml.beta.QNode`, and `@qml.beta.qnode`. [(#1642)](https://github.com/PennyLaneAI/pennylane/pull/1642) [(#1646)](https://github.com/PennyLaneAI/pennylane/pull/1646) - [(#1674)](https://github.com/PennyLaneAI/pennylane/pull/1674) + [(#1651)](https://github.com/PennyLaneAI/pennylane/pull/1651) It differs from the standard QNode in several ways: @@ -131,7 +131,7 @@ * Two new methods were added to the Device API, allowing PennyLane devices increased control over circuit decompositions. - [(#1674)](https://github.com/PennyLaneAI/pennylane/pull/1674) + [(#1651)](https://github.com/PennyLaneAI/pennylane/pull/1651) - `Device.expand_fn(tape) -> tape`: expands a tape such that it is supported by the device. By default, performs the standard device-specific gate set decomposition done in the default diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index d38420ee410..1bacee89eaf 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -168,7 +168,6 @@ class QNode: QNodes can be created by decorating a quantum function: >>> dev = qml.device("default.qubit", wires=1) - >>> @qml.beta.qnode(dev) ... def circuit(x): ... qml.RX(x, wires=0) From af9c4c4b30776cde20b14f549b43e27d26e5f222 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 22 Sep 2021 12:35:15 +0800 Subject: [PATCH 46/52] Apply suggestions from code review --- pennylane/_device.py | 2 +- pennylane/beta/qnode.py | 1 - tests/beta/test_beta_qnode.py | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pennylane/_device.py b/pennylane/_device.py index 32a85c67195..6474c8ba1a7 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -582,7 +582,7 @@ def expand_fn(self, circuit, max_expansion=10): Returns: .QuantumTape: The expanded/decomposed circuit, such that the device - will support native + will natively support all operations. """ obs_on_same_wire = len( circuit._obs_sharing_wires # pylint: disable=protected-access diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index 1bacee89eaf..db763cba7a5 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -242,7 +242,6 @@ def __init__( self._tape = None self._qfunc_output = None self._user_gradient_kwargs = gradient_kwargs - self._original_device = device self.gradient_fn = None self.gradient_kwargs = None diff --git a/tests/beta/test_beta_qnode.py b/tests/beta/test_beta_qnode.py index aeebf02e2c7..1791b2a8d77 100644 --- a/tests/beta/test_beta_qnode.py +++ b/tests/beta/test_beta_qnode.py @@ -941,7 +941,7 @@ def circuit(x): def test_no_gradient_expansion(self, mocker): """Test that an unsupported operation with defined gradient recipe is - not expanded for both parameter-shift and finite-differences""" + not expanded""" dev = qml.device("default.qubit", wires=1) class UnsupportedOp(qml.operation.Operation): @@ -986,7 +986,7 @@ def circuit(x): def test_gradient_expansion(self, mocker): """Test that a *supported* operation with no gradient recipe is - expanded for both parameter-shift and finite-differences, but not for execution.""" + expanded when applying the gradient transform, but not for execution.""" dev = qml.device("default.qubit", wires=1) class PhaseShift(qml.PhaseShift): From 94de1469379a7977a49bf91c8ee455fc4ef8f33c Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 22 Sep 2021 22:41:07 +0800 Subject: [PATCH 47/52] Apply suggestions from code review Co-authored-by: Olivia Di Matteo <2068515+glassnotes@users.noreply.github.com> --- doc/releases/changelog-dev.md | 2 +- pennylane/_device.py | 4 +--- pennylane/beta/qnode.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index c6ef073700c..72b3c18797b 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -141,7 +141,7 @@ will assume that the expanded tape returns exactly the same value as the original tape when executed. - - `Device.batch_transform(tape) -> (tapes, processing_fn)`: pre-processes the tape in the case + - `Device.batch_transform(tape) -> (tapes, processing_fn)`: preprocesses the tape in the case where the device needs to generate multiple circuits to execute from the input circuit. The requirement of a post-processing function makes this distinct to the `expand_fn` method above. diff --git a/pennylane/_device.py b/pennylane/_device.py index 6474c8ba1a7..e6c3a3af4e2 100644 --- a/pennylane/_device.py +++ b/pennylane/_device.py @@ -612,7 +612,7 @@ def batch_transform(self, circuit): By default, this method contains logic for generating multiple circuits, one per term, of a circuit that terminates in ``expval(H)``, if the underlying device does not support Hamiltonian expectation values, - or if the device requires finite-shots. + or if the device requires finite shots. .. warning:: @@ -633,8 +633,6 @@ def batch_transform(self, circuit): # If the observable contains a Hamiltonian and the device does not # support Hamiltonians, or if the simulation uses finite shots, # split tape into multiple tapes of diagonalizable known observables. - # In future, this logic should be moved to the device - # to allow for more efficient batch execution. supports_hamiltonian = self.supports_observable("Hamiltonian") finite_shots = self.shots is not None diff --git a/pennylane/beta/qnode.py b/pennylane/beta/qnode.py index db763cba7a5..c85cab28327 100644 --- a/pennylane/beta/qnode.py +++ b/pennylane/beta/qnode.py @@ -136,7 +136,7 @@ class QNode: such that all circuit operations are natively supported by the device. The ``gradient`` strategy typically results in a reduction in quantum device evaluations - required during optimization, at the expense of an increase in classical pre-processing. + required during optimization, at the expense of an increase in classical preprocessing. max_expansion (int): The number of times the internal circuit should be expanded when executed on a device. Expansion occurs when an operation or measurement is not supported, and results in a gate decomposition. If any operations in the decomposition From 61383be5dcc26ec95f6025d17b7cc736a947cda0 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 22 Sep 2021 22:55:03 +0800 Subject: [PATCH 48/52] Apply suggestions from code review Co-authored-by: anthayes92 <34694788+anthayes92@users.noreply.github.com> --- tests/interfaces/test_batch_autograd_qnode.py | 8 +++++--- tests/interfaces/test_batch_tensorflow_qnode.py | 8 +++++--- tests/interfaces/test_batch_torch_qnode.py | 6 ++++-- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/interfaces/test_batch_autograd_qnode.py b/tests/interfaces/test_batch_autograd_qnode.py index 36c1845561f..847f4326a28 100644 --- a/tests/interfaces/test_batch_autograd_qnode.py +++ b/tests/interfaces/test_batch_autograd_qnode.py @@ -1275,7 +1275,8 @@ def circuit(x, y): @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic(self, dev_name, diff_method, mode, max_diff, mocker): """Test that the Hamiltonian is not expanded if there - are non-commuting groups and the number of shots is None""" + are non-commuting groups and the number of shots is None + and the first and second order gradients are correctly evaluated""" if diff_method == "adjoint": pytest.skip("The adjoint method does not yet support Hamiltonians") @@ -1329,7 +1330,8 @@ def test_hamiltonian_expansion_finite_shots( self, dev_name, diff_method, mode, max_diff, mocker ): """Test that the Hamiltonian is expanded if there - are non-commuting groups and the number of shots is finite""" + are non-commuting groups and the number of shots is finite + and the first and second order gradients are correctly evaluated""" if diff_method in ("adjoint", "backprop", "finite-diff"): pytest.skip("The adjoint and backprop methods do not yet support sampling") @@ -1350,7 +1352,7 @@ def circuit(data, weights, coeffs): w = np.array([0.654, -0.734], requires_grad=True) c = np.array([-0.6543, 0.24, 0.54], requires_grad=True) - # # test output + # test output res = circuit(d, w, c) expected = c[2] * np.cos(d[1] + w[1]) - c[1] * np.sin(d[0] + w[0]) * np.sin(d[1] + w[1]) assert np.allclose(res, expected, atol=0.1) diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py index 54fb5b2f50f..41252fb1493 100644 --- a/tests/interfaces/test_batch_tensorflow_qnode.py +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -1146,7 +1146,8 @@ def circuit(x, y): @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic(self, dev_name, diff_method, mode, max_diff, mocker): """Test that the Hamiltonian is not expanded if there - are non-commuting groups and the number of shots is None""" + are non-commuting groups and the number of shots is None + and the first and second order gradients are correctly evaluated""" if diff_method == "adjoint": pytest.skip("The adjoint method does not yet support Hamiltonians") @@ -1204,7 +1205,8 @@ def test_hamiltonian_expansion_finite_shots( self, dev_name, diff_method, mode, max_diff, mocker ): """Test that the Hamiltonian is expanded if there - are non-commuting groups and the number of shots is finite""" + are non-commuting groups and the number of shots is finite + and the first and second order gradients are correctly evaluated""" if diff_method in ("adjoint", "backprop", "finite-diff"): pytest.skip("The adjoint and backprop methods do not yet support sampling") @@ -1225,7 +1227,7 @@ def circuit(data, weights, coeffs): w = tf.Variable([0.654, -0.734], dtype=tf.float64) c = tf.Variable([-0.6543, 0.24, 0.54], dtype=tf.float64) - # # test output + # test output with tf.GradientTape(persistent=True) as t2: with tf.GradientTape() as t1: res = circuit(d, w, c) diff --git a/tests/interfaces/test_batch_torch_qnode.py b/tests/interfaces/test_batch_torch_qnode.py index c3898209702..508f62e624f 100644 --- a/tests/interfaces/test_batch_torch_qnode.py +++ b/tests/interfaces/test_batch_torch_qnode.py @@ -1186,7 +1186,8 @@ def circuit(x, y): @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic(self, dev_name, diff_method, mode, max_diff, mocker): """Test that the Hamiltonian is not expanded if there - are non-commuting groups and the number of shots is None""" + are non-commuting groups and the number of shots is None + and the first and second order gradients are correctly evaluated""" if diff_method == "adjoint": pytest.skip("The adjoint method does not yet support Hamiltonians") @@ -1256,7 +1257,8 @@ def test_hamiltonian_expansion_finite_shots( self, dev_name, diff_method, mode, max_diff, mocker ): """Test that the Hamiltonian is expanded if there - are non-commuting groups and the number of shots is finite""" + are non-commuting groups and the number of shots is finite + and the first and second order gradients are correctly evaluated""" if diff_method in ("adjoint", "backprop", "finite-diff"): pytest.skip("The adjoint and backprop methods do not yet support sampling") From d78f5a4b59c54d344dad507a3ed59f638a76600a Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 22 Sep 2021 22:56:12 +0800 Subject: [PATCH 49/52] Update tests/interfaces/test_batch_torch_qnode.py Co-authored-by: anthayes92 <34694788+anthayes92@users.noreply.github.com> --- tests/interfaces/test_batch_torch_qnode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/interfaces/test_batch_torch_qnode.py b/tests/interfaces/test_batch_torch_qnode.py index 508f62e624f..9901afac62c 100644 --- a/tests/interfaces/test_batch_torch_qnode.py +++ b/tests/interfaces/test_batch_torch_qnode.py @@ -1279,7 +1279,7 @@ def circuit(data, weights, coeffs): w = torch.tensor([0.654, -0.734], requires_grad=True, dtype=torch.float64) c = torch.tensor([-0.6543, 0.24, 0.54], requires_grad=True, dtype=torch.float64) - # # test output + # test output res = circuit(d, w, c) expected = c[2] * torch.cos(d[1] + w[1]) - c[1] * torch.sin(d[0] + w[0]) * torch.sin( From 6bdcd212c80e85f78336a2eec0b468d2f8c44951 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 24 Sep 2021 00:38:57 +0800 Subject: [PATCH 50/52] add grad_method=None to all templates --- pennylane/templates/layers/cv_neural_net.py | 1 + pennylane/templates/layers/particle_conserving_u1.py | 1 + pennylane/templates/layers/particle_conserving_u2.py | 1 + pennylane/templates/layers/random.py | 1 + pennylane/templates/layers/simplified_two_design.py | 1 + .../state_preparations/arbitrary_state_preparation.py | 1 + pennylane/templates/state_preparations/basis.py | 1 + .../templates/subroutines/all_singles_doubles.py | 1 + .../templates/subroutines/approx_time_evolution.py | 1 + pennylane/templates/subroutines/arbitrary_unitary.py | 1 + .../subroutines/double_excitation_unitary.py | 11 +++++++++++ pennylane/templates/subroutines/grover.py | 1 + pennylane/templates/subroutines/permute.py | 1 + pennylane/templates/subroutines/qmc.py | 1 + pennylane/templates/subroutines/qpe.py | 1 + .../subroutines/single_excitation_unitary.py | 11 +++++++++++ pennylane/templates/subroutines/uccsd.py | 1 + 17 files changed, 37 insertions(+) diff --git a/pennylane/templates/layers/cv_neural_net.py b/pennylane/templates/layers/cv_neural_net.py index cfb067a474a..80f59559157 100644 --- a/pennylane/templates/layers/cv_neural_net.py +++ b/pennylane/templates/layers/cv_neural_net.py @@ -83,6 +83,7 @@ def circuit(): num_params = 11 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__( self, diff --git a/pennylane/templates/layers/particle_conserving_u1.py b/pennylane/templates/layers/particle_conserving_u1.py index 087ac5e8fc0..2d0632a4069 100644 --- a/pennylane/templates/layers/particle_conserving_u1.py +++ b/pennylane/templates/layers/particle_conserving_u1.py @@ -228,6 +228,7 @@ class ParticleConservingU1(Operation): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, weights, wires, init_state=None, do_queue=True, id=None): diff --git a/pennylane/templates/layers/particle_conserving_u2.py b/pennylane/templates/layers/particle_conserving_u2.py index dbf959760b7..36a41a00d08 100644 --- a/pennylane/templates/layers/particle_conserving_u2.py +++ b/pennylane/templates/layers/particle_conserving_u2.py @@ -150,6 +150,7 @@ class ParticleConservingU2(Operation): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, weights, wires, init_state=None, do_queue=True, id=None): diff --git a/pennylane/templates/layers/random.py b/pennylane/templates/layers/random.py index 4d0da0dbd18..e61c08bea9f 100644 --- a/pennylane/templates/layers/random.py +++ b/pennylane/templates/layers/random.py @@ -178,6 +178,7 @@ def circuit_rnd(weights): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__( self, diff --git a/pennylane/templates/layers/simplified_two_design.py b/pennylane/templates/layers/simplified_two_design.py index 01558e652dd..92474b0e661 100644 --- a/pennylane/templates/layers/simplified_two_design.py +++ b/pennylane/templates/layers/simplified_two_design.py @@ -101,6 +101,7 @@ def circuit(init_weights, weights): num_params = 2 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, initial_layer_weights, weights, wires, do_queue=True, id=None): diff --git a/pennylane/templates/state_preparations/arbitrary_state_preparation.py b/pennylane/templates/state_preparations/arbitrary_state_preparation.py index 1c0222eea65..534f28ec59c 100644 --- a/pennylane/templates/state_preparations/arbitrary_state_preparation.py +++ b/pennylane/templates/state_preparations/arbitrary_state_preparation.py @@ -82,6 +82,7 @@ def vqe(weights): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, weights, wires, do_queue=True, id=None): diff --git a/pennylane/templates/state_preparations/basis.py b/pennylane/templates/state_preparations/basis.py index ba4426a0a92..1e2c402ac12 100644 --- a/pennylane/templates/state_preparations/basis.py +++ b/pennylane/templates/state_preparations/basis.py @@ -51,6 +51,7 @@ def circuit(basis_state): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, basis_state, wires, do_queue=True, id=None): diff --git a/pennylane/templates/subroutines/all_singles_doubles.py b/pennylane/templates/subroutines/all_singles_doubles.py index 35f8173f04e..99b725a141b 100644 --- a/pennylane/templates/subroutines/all_singles_doubles.py +++ b/pennylane/templates/subroutines/all_singles_doubles.py @@ -114,6 +114,7 @@ def circuit(weights, hf_state, singles, doubles): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__( self, weights, wires, hf_state, singles=None, doubles=None, do_queue=True, id=None diff --git a/pennylane/templates/subroutines/approx_time_evolution.py b/pennylane/templates/subroutines/approx_time_evolution.py index bfde18f723f..7b338d1701b 100644 --- a/pennylane/templates/subroutines/approx_time_evolution.py +++ b/pennylane/templates/subroutines/approx_time_evolution.py @@ -100,6 +100,7 @@ def circuit(time): num_params = 3 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, hamiltonian, time, n, do_queue=True, id=None): diff --git a/pennylane/templates/subroutines/arbitrary_unitary.py b/pennylane/templates/subroutines/arbitrary_unitary.py index e6acb4fde9c..f25b5902d2b 100644 --- a/pennylane/templates/subroutines/arbitrary_unitary.py +++ b/pennylane/templates/subroutines/arbitrary_unitary.py @@ -95,6 +95,7 @@ def arbitrary_nearest_neighbour_interaction(weights, wires): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, weights, wires, do_queue=True, id=None): diff --git a/pennylane/templates/subroutines/double_excitation_unitary.py b/pennylane/templates/subroutines/double_excitation_unitary.py index ba0fa88bd60..24b919253bb 100644 --- a/pennylane/templates/subroutines/double_excitation_unitary.py +++ b/pennylane/templates/subroutines/double_excitation_unitary.py @@ -21,6 +21,15 @@ from pennylane.ops import RZ, RX, CNOT, Hadamard +# Four term gradient recipe for controlled rotations +INV_SQRT2 = 1 / math.sqrt(2) +c1 = INV_SQRT2 * (np.sqrt(2) + 1) / 4 +c2 = INV_SQRT2 * (np.sqrt(2) - 1) / 4 +a = np.pi / 2 +b = 3 * np.pi / 2 +four_term_grad_recipe = ([[c1, 1, a], [-c1, 1, -a], [-c2, 1, b], [c2, 1, -b]],) + + def _layer1(weight, s, r, q, p, set_cnot_wires): r"""Implement the first layer of the circuit to exponentiate the double-excitation operator entering the UCCSD ansatz. @@ -476,6 +485,8 @@ def circuit(weight, wires1=None, wires2=None): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = "A" + grad_recipe = four_term_grad_recipe def __init__(self, weight, wires1=None, wires2=None, do_queue=True, id=None): diff --git a/pennylane/templates/subroutines/grover.py b/pennylane/templates/subroutines/grover.py index ff5975259b1..46f2d83ab8c 100644 --- a/pennylane/templates/subroutines/grover.py +++ b/pennylane/templates/subroutines/grover.py @@ -102,6 +102,7 @@ def GroverSearch(num_iterations=1): num_params = 0 num_wires = AnyWires par_domain = None + grad_method = None def __init__(self, wires=None, work_wires=None, do_queue=True, id=None): if (not hasattr(wires, "__len__")) or (len(wires) < 2): diff --git a/pennylane/templates/subroutines/permute.py b/pennylane/templates/subroutines/permute.py index c9f05448a28..a6e514b1fac 100644 --- a/pennylane/templates/subroutines/permute.py +++ b/pennylane/templates/subroutines/permute.py @@ -143,6 +143,7 @@ def circuit() num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, permutation, wires, do_queue=True, id=None): diff --git a/pennylane/templates/subroutines/qmc.py b/pennylane/templates/subroutines/qmc.py index 09e362bf3e5..0b328a2e57b 100644 --- a/pennylane/templates/subroutines/qmc.py +++ b/pennylane/templates/subroutines/qmc.py @@ -330,6 +330,7 @@ def circuit(): num_params = 3 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, probs, func, target_wires, estimation_wires, do_queue=True, id=None): if isinstance(probs, np.ndarray) and probs.ndim != 1: diff --git a/pennylane/templates/subroutines/qpe.py b/pennylane/templates/subroutines/qpe.py index 34c05ea5e76..8dc6d6b5b45 100644 --- a/pennylane/templates/subroutines/qpe.py +++ b/pennylane/templates/subroutines/qpe.py @@ -107,6 +107,7 @@ def circuit(): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__(self, unitary, target_wires, estimation_wires, do_queue=True, id=None): self.target_wires = list(target_wires) diff --git a/pennylane/templates/subroutines/single_excitation_unitary.py b/pennylane/templates/subroutines/single_excitation_unitary.py index 8b6a8c2aed8..1c3aab28bba 100644 --- a/pennylane/templates/subroutines/single_excitation_unitary.py +++ b/pennylane/templates/subroutines/single_excitation_unitary.py @@ -21,6 +21,15 @@ from pennylane.ops import RZ, RX, CNOT, Hadamard +# Four term gradient recipe for controlled rotations +INV_SQRT2 = 1 / math.sqrt(2) +c1 = INV_SQRT2 * (np.sqrt(2) + 1) / 4 +c2 = INV_SQRT2 * (np.sqrt(2) - 1) / 4 +a = np.pi / 2 +b = 3 * np.pi / 2 +four_term_grad_recipe = ([[c1, 1, a], [-c1, 1, -a], [-c2, 1, b], [c2, 1, -b]],) + + class SingleExcitationUnitary(Operation): r"""Circuit to exponentiate the tensor product of Pauli matrices representing the single-excitation operator entering the Unitary Coupled-Cluster Singles @@ -115,6 +124,8 @@ def circuit(weight, wires=None): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = "A" + grad_recipe = four_term_grad_recipe def __init__(self, weight, wires=None, do_queue=True, id=None): if len(wires) < 2: diff --git a/pennylane/templates/subroutines/uccsd.py b/pennylane/templates/subroutines/uccsd.py index b6dc04b4fb7..a430882e945 100644 --- a/pennylane/templates/subroutines/uccsd.py +++ b/pennylane/templates/subroutines/uccsd.py @@ -143,6 +143,7 @@ class UCCSD(Operation): num_params = 1 num_wires = AnyWires par_domain = "A" + grad_method = None def __init__( self, weights, wires, s_wires=None, d_wires=None, init_state=None, do_queue=True, id=None From 320b46826d41fbf868d0d0e815f411a98fc30458 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 24 Sep 2021 00:41:14 +0800 Subject: [PATCH 51/52] fix --- pennylane/templates/subroutines/double_excitation_unitary.py | 2 ++ pennylane/templates/subroutines/single_excitation_unitary.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/pennylane/templates/subroutines/double_excitation_unitary.py b/pennylane/templates/subroutines/double_excitation_unitary.py index 24b919253bb..27ac7b96102 100644 --- a/pennylane/templates/subroutines/double_excitation_unitary.py +++ b/pennylane/templates/subroutines/double_excitation_unitary.py @@ -15,6 +15,8 @@ Contains the DoubleExcitationUnitary template. """ # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access +import math + import numpy as np import pennylane as qml from pennylane.operation import Operation, AnyWires diff --git a/pennylane/templates/subroutines/single_excitation_unitary.py b/pennylane/templates/subroutines/single_excitation_unitary.py index 1c3aab28bba..5d12ea12d25 100644 --- a/pennylane/templates/subroutines/single_excitation_unitary.py +++ b/pennylane/templates/subroutines/single_excitation_unitary.py @@ -15,6 +15,8 @@ Contains the SingleExcitationUnitary template. """ # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access +import math + import pennylane as qml from pennylane import numpy as np from pennylane.operation import Operation, AnyWires From bb640fe0ca64fcb98c2c38a28b9f14e91f54ad97 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 24 Sep 2021 00:56:51 +0800 Subject: [PATCH 52/52] fix --- pennylane/templates/subroutines/double_excitation_unitary.py | 2 +- pennylane/templates/subroutines/single_excitation_unitary.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pennylane/templates/subroutines/double_excitation_unitary.py b/pennylane/templates/subroutines/double_excitation_unitary.py index 27ac7b96102..24026e804cb 100644 --- a/pennylane/templates/subroutines/double_excitation_unitary.py +++ b/pennylane/templates/subroutines/double_excitation_unitary.py @@ -486,7 +486,7 @@ def circuit(weight, wires1=None, wires2=None): num_params = 1 num_wires = AnyWires - par_domain = "A" + par_domain = "R" grad_method = "A" grad_recipe = four_term_grad_recipe diff --git a/pennylane/templates/subroutines/single_excitation_unitary.py b/pennylane/templates/subroutines/single_excitation_unitary.py index 5d12ea12d25..97ebdece520 100644 --- a/pennylane/templates/subroutines/single_excitation_unitary.py +++ b/pennylane/templates/subroutines/single_excitation_unitary.py @@ -125,7 +125,7 @@ def circuit(weight, wires=None): num_params = 1 num_wires = AnyWires - par_domain = "A" + par_domain = "R" grad_method = "A" grad_recipe = four_term_grad_recipe