diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 9fd9c6c0a00..f992fdebf31 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -4,6 +4,46 @@

New features since last release

+* It is now possible to use TensorFlow's [AutoGraph + mode](https://www.tensorflow.org/guide/function) with QNodes on all devices and with arbitrary + differentiation methods. Previously, AutoGraph mode only support `diff_method="backprop"`. This + will result in significantly more performant model execution, at the cost of a more expensive + initial compilation. [(#1866)](https://github.com/PennyLaneAI/pennylane/pull/1886) + + Use AutoGraph to convert your QNodes or cost functions into TensorFlow + graphs by decorating them with `@tf.function`: + + ```python + dev = qml.device("lightning.qubit", wires=2) + + @qml.beta.qnode(dev, diff_method="adjoint", interface="tf", max_diff=1) + def circuit(x): + qml.RX(x[0], wires=0) + qml.RY(x[1], wires=1) + return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)), qml.expval(qml.PauliZ(0)) + + @tf.function + def cost(x): + return tf.reduce_sum(circuit(x)) + + x = tf.Variable([0.5, 0.7], dtype=tf.float64) + + with tf.GradientTape() as tape: + loss = cost(x) + + grad = tape.gradient(loss, x) + ``` + + The initial execution may take slightly longer than when executing the circuit in + eager mode; this is because TensorFlow is tracing the function to create the graph. + Subsequent executions will be much more performant. + + Note that using AutoGraph with backprop-enabled devices, such as `default.qubit`, + will yield the best performance. + + For more details, please see the [TensorFlow AutoGraph + documentation](https://www.tensorflow.org/guide/function). + * `qml.math.scatter_element_add` now supports adding multiple values at multiple indices with a single function call, in all interfaces [(#1864)](https://github.com/PennyLaneAI/pennylane/pull/1864) @@ -231,5 +271,6 @@ This release contains contributions from (in alphabetical order): -Guillermo Alonso-Linaje, Benjamin Cordier, Olivia Di Matteo, Jalani Kanem, Ankit Khandelwal, Shumpei Kobayashi, -Christina Lee, Alejandro Montanez, Romain Moyard, Maria Schuld, Jay Soni, David Wierichs +Guillermo Alonso-Linaje, Benjamin Cordier, Olivia Di Matteo, Josh Izaac, +Jalani Kanem, Ankit Khandelwal, Shumpei Kobayashi, Christina Lee, Alejandro Montanez, +Romain Moyard, Maria Schuld, Jay Soni, David Wierichs diff --git a/pennylane/gradients/vjp.py b/pennylane/gradients/vjp.py index ee2a40d332e..33334b14edc 100644 --- a/pennylane/gradients/vjp.py +++ b/pennylane/gradients/vjp.py @@ -20,7 +20,7 @@ from pennylane import math -def compute_vjp(dy, jac): +def compute_vjp(dy, jac, num=None): """Convenience function to compute the vector-Jacobian product for a given vector of gradient outputs and a Jacobian. @@ -29,6 +29,9 @@ def compute_vjp(dy, jac): jac (tensor_like): Jacobian matrix. For an n-dimensional ``dy`` vector, the first n-dimensions of ``jac`` should match the shape of ``dy``. + num (int): The length of the flattened ``dy`` argument. This is an + optional argument, but can be useful to provide if ``dy`` potentially + has no shape (for example, due to tracing or just-in-time compilation). Returns: tensor_like: the vector-Jacobian product @@ -38,10 +41,13 @@ def compute_vjp(dy, jac): dy_row = math.reshape(dy, [-1]) + if num is None: + num = math.shape(dy_row)[0] + if not isinstance(dy_row, np.ndarray): jac = math.convert_like(jac, dy_row) - jac = math.reshape(jac, [dy_row.shape[0], -1]) + jac = math.reshape(jac, [num, -1]) try: if math.allclose(dy, 0): @@ -156,23 +162,23 @@ def vjp(tape, dy, gradient_fn, gradient_kwargs=None): if num_params == 0: # The tape has no trainable parameters; the VJP # is simply none. - return [], lambda _: None + return [], lambda _, num=None: None try: if math.allclose(dy, 0): # If the dy vector is zero, then the # corresponding element of the VJP will be zero, # and we can avoid a quantum computation. - return [], lambda _: math.convert_like(np.zeros([num_params]), dy) + return [], lambda _, num=None: math.convert_like(np.zeros([num_params]), dy) except (AttributeError, TypeError): pass gradient_tapes, fn = gradient_fn(tape, **gradient_kwargs) - def processing_fn(results): + def processing_fn(results, num=None): # postprocess results to compute the Jacobian jac = fn(results) - return compute_vjp(dy, jac) + return compute_vjp(dy, jac, num=num) return gradient_tapes, processing_fn @@ -304,10 +310,13 @@ def ansatz(x): processing_fns.append(fn) gradient_tapes.extend(g_tapes) - def processing_fn(results): + def processing_fn(results, nums=None): vjps = [] start = 0 + if nums is None: + nums = [None] * len(tapes) + for t_idx in range(len(tapes)): # extract the correct results from the flat list res_len = reshape_info[t_idx] @@ -315,7 +324,7 @@ def processing_fn(results): start += res_len # postprocess results to compute the VJP - vjp_ = processing_fns[t_idx](res_t) + vjp_ = processing_fns[t_idx](res_t, num=nums[t_idx]) if vjp_ is None: if reduction == "append": diff --git a/pennylane/interfaces/batch/__init__.py b/pennylane/interfaces/batch/__init__.py index a6b6145d8f0..82d8b65b89d 100644 --- a/pennylane/interfaces/batch/__init__.py +++ b/pennylane/interfaces/batch/__init__.py @@ -31,7 +31,7 @@ "Autograd": ("autograd", "numpy"), # for backwards compatibility "JAX": ("jax", "JAX"), "PyTorch": ("torch", "pytorch"), - "TensorFlow": ("tf", "tensorflow"), + "TensorFlow": ("tf", "tensorflow", "tensorflow-autograph", "tf-autograph"), } """dict[str, str]: maps allowed interface strings to the name of the interface""" @@ -322,6 +322,7 @@ def cost_fn(params, x): # the default execution function is batch_execute execute_fn = cache_execute(batch_execute, cache, expand_fn=expand_fn) + _mode = "backward" if gradient_fn == "device": # gradient function is a device method @@ -338,6 +339,7 @@ def cost_fn(params, x): # both results and gradients execute_fn = set_shots(device, override_shots)(device.execute_and_gradients) gradient_fn = None + _mode = "forward" elif mode == "backward": # disable caching on the forward pass @@ -361,7 +363,13 @@ def cost_fn(params, x): if interface in INTERFACE_NAMES["Autograd"]: from .autograd import execute as _execute elif interface in INTERFACE_NAMES["TensorFlow"]: - from .tensorflow import execute as _execute + import tensorflow as tf + + if not tf.executing_eagerly() or "autograph" in interface: + from .tensorflow_autograph import execute as _execute + else: + from .tensorflow import execute as _execute + elif interface in INTERFACE_NAMES["PyTorch"]: from .torch import execute as _execute elif interface in INTERFACE_NAMES["JAX"]: @@ -379,6 +387,8 @@ def cost_fn(params, x): f"version of {interface_name} to enable the '{interface}' interface." ) from e - res = _execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff) + res = _execute( + tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff, mode=_mode + ) return res diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index ec75fe6f5ec..9053492a4c9 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -23,7 +23,7 @@ from pennylane import numpy as np -def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2): +def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2, mode=None): """Execute a batch of tapes with Autograd parameters on a device. Args: @@ -44,11 +44,14 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d the maximum order of derivatives to support. Increasing this value allows for higher order derivatives to be extracted, at the cost of additional (classical) computational overhead during the backwards pass. + mode (str): Whether the gradients should be computed on the forward + pass (``forward``) or the backward pass (``backward``). Returns: list[list[float]]: A nested list of tape results. Each element in the returned list corresponds in order to the provided tapes. """ + # pylint: disable=unused-argument for tape in tapes: # set the trainable parameters params = tape.get_parameters(trainable_only=False) diff --git a/pennylane/interfaces/batch/jax.py b/pennylane/interfaces/batch/jax.py index a3a8462daaa..d4e2f1373e1 100644 --- a/pennylane/interfaces/batch/jax.py +++ b/pennylane/interfaces/batch/jax.py @@ -27,7 +27,7 @@ dtype = jnp.float64 -def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=1): +def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=1, mode=None): """Execute a batch of tapes with JAX parameters on a device. Args: @@ -48,11 +48,14 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d the maximum order of derivatives to support. Increasing this value allows for higher order derivatives to be extracted, at the cost of additional (classical) computational overhead during the backwards pass. + mode (str): Whether the gradients should be computed on the forward + pass (``forward``) or the backward pass (``backward``). Returns: list[list[float]]: A nested list of tape results. Each element in the returned list corresponds in order to the provided tapes. """ + # pylint: disable=unused-argument if max_diff > 1: raise ValueError("The JAX interface only supports first order derivatives.") diff --git a/pennylane/interfaces/batch/tensorflow.py b/pennylane/interfaces/batch/tensorflow.py index d96852259fa..96058cd8aec 100644 --- a/pennylane/interfaces/batch/tensorflow.py +++ b/pennylane/interfaces/batch/tensorflow.py @@ -39,7 +39,7 @@ def _compute_vjp(dy, jacs): return vjps -def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2): +def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2, mode=None): """Execute a batch of tapes with TensorFlow parameters on a device. Args: @@ -60,11 +60,14 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d the maximum number of derivatives to support. Increasing this value allows for higher order derivatives to be extracted, at the cost of additional (classical) computational overhead during the backwards pass. + mode (str): Whether the gradients should be computed on the forward + pass (``forward``) or the backward pass (``backward``). Returns: list[list[tf.Tensor]]: A nested list of tape results. Each element in the returned list corresponds in order to the provided tapes. """ + # pylint: disable=unused-argument parameters = [] params_unwrapped = [] diff --git a/pennylane/interfaces/batch/tensorflow_autograph.py b/pennylane/interfaces/batch/tensorflow_autograph.py new file mode 100644 index 00000000000..47bc2151440 --- /dev/null +++ b/pennylane/interfaces/batch/tensorflow_autograph.py @@ -0,0 +1,238 @@ +# Copyright 2018-2021 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains functions for adding the TensorFlow Autograph interface +to a PennyLane Device class. +""" +# pylint: disable=too-many-arguments,too-many-branches,too-many-statements +import numpy as np +import tensorflow as tf + +import pennylane as qml + + +from .tensorflow import _compute_vjp + + +def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2, mode=None): + """Execute a batch of tapes with TensorFlow parameters on a device. + + Args: + tapes (Sequence[.QuantumTape]): batch of tapes to execute + device (.Device): Device to use to execute the batch of tapes. + If the device does not provide a ``batch_execute`` method, + by default the tapes will be executed in serial. + execute_fn (callable): The execution function used to execute the tapes + during the forward pass. This function must return a tuple ``(results, jacobians)``. + If ``jacobians`` is an empty list, then ``gradient_fn`` is used to + compute the gradients during the backwards pass. + gradient_kwargs (dict): dictionary of keyword arguments to pass when + determining the gradients of tapes + gradient_fn (callable): the gradient function to use to compute quantum gradients + _n (int): a positive integer used to track nesting of derivatives, for example + if the nth-order derivative is requested. + max_diff (int): If ``gradient_fn`` is a gradient transform, this option specifies + the maximum number of derivatives to support. Increasing this value allows + for higher order derivatives to be extracted, at the cost of additional + (classical) computational overhead during the backwards pass. + mode (str): Whether the gradients should be computed on the forward + pass (``forward``) or the backward pass (``backward``). + + Returns: + list[list[tf.Tensor]]: A nested list of tape results. Each element in + the returned list corresponds in order to the provided tapes. + """ + all_params = [] + parameters = [] + lens = [] + trainable = [] + output_types = [] + + for tape in tapes: + # store the trainable parameters + params = tape.get_parameters(trainable_only=False) + tape.trainable_params = qml.math.get_trainable_indices(params) + + parameters += [p for i, p in enumerate(params) if i in tape.trainable_params] + all_params += params + trainable += (np.array(list(tape.trainable_params)) + sum(lens)).tolist() + + lens.append(len(params)) + + if tape.all_sampled: + output_types.append(tf.int64) + elif tape.measurements[0].return_type is qml.operation.State: + output_types.append(tf.complex128) + else: + output_types.append(tf.float64) + + if mode == "forward": + output_types += [tf.float64] * len(tapes) + + output_types += [tf.int32] * len(tapes) + + def _nest_params(all_params): + count = 0 + params_unwrapped = [] + + for s in lens: + params_unwrapped.append(all_params[count : count + s]) + count += s + + return params_unwrapped + + def _forward(*all_params): + params_unwrapped = _nest_params(all_params) + output_sizes = [] + + with qml.tape.Unwrap(*tapes, params=params_unwrapped): + # Forward pass: execute the tapes + res, jacs = execute_fn(tapes, **gradient_kwargs) + + for i, _ in enumerate(tapes): + # convert output to TensorFlow tensors + + # For backwards compatibility, we flatten ragged tape outputs + # when there is no sampling + r = np.hstack(res[i]) if res[i].dtype == np.dtype("object") else res[i] + + res[i] = tf.convert_to_tensor(r) + output_sizes.append(tf.size(res[i])) + + return res + jacs + output_sizes + + @tf.custom_gradient + def _execute(*all_params): # pylint:disable=unused-argument + + res = tf.numpy_function(func=_forward, inp=all_params, Tout=output_types) + output_sizes = res[-len(tapes) :] + + if mode == "forward": + jacs = res[len(tapes) : 2 * len(tapes)] + + res = res[: len(tapes)] + + def grad_fn(*dy, **tfkwargs): + """Returns the vector-Jacobian product with given + parameter values and output gradient dy""" + + dy = [qml.math.T(d) for d in dy[: len(res)]] + + if mode == "forward": + # Jacobians were computed on the forward pass (mode="forward") + # No additional quantum evaluations needed; simply compute the VJPs directly. + len_dy = len(dy) + vjps = tf.numpy_function( + func=lambda *args: _compute_vjp(args[:len_dy], args[len_dy:]), + inp=dy + jacs, + Tout=[tf.float64] * len(parameters), + ) + + else: + # Need to compute the Jacobians on the backward pass (accumulation="backward") + if isinstance(gradient_fn, qml.gradients.gradient_transform): + # Gradient function is a gradient transform. + + # Generate and execute the required gradient tapes + if _n == max_diff: + + len_all_params = len(all_params) + + def _backward(*all_params): + dy = all_params[len_all_params:] + all_params = all_params[:len_all_params] + params_unwrapped = _nest_params(all_params) + + with qml.tape.Unwrap(*tapes, params=params_unwrapped): + vjp_tapes, processing_fn = qml.gradients.batch_vjp( + tapes, + dy, + gradient_fn, + reduction=lambda vjps, x: vjps.extend(qml.math.unstack(x)), + gradient_kwargs=gradient_kwargs, + ) + + vjps = processing_fn(execute_fn(vjp_tapes)[0]) + return vjps + + vjps = tf.py_function( + func=_backward, + inp=list(all_params) + dy, + Tout=[tf.float64] * len(parameters), + ) + + else: + vjp_tapes, processing_fn = qml.gradients.batch_vjp( + tapes, + dy, + gradient_fn, + reduction="append", + gradient_kwargs=gradient_kwargs, + ) + + # This is where the magic happens. Note that we call ``execute``. + # This recursion, coupled with the fact that the gradient transforms + # are differentiable, allows for arbitrary order differentiation. + vjps = processing_fn( + execute( + vjp_tapes, + device, + execute_fn, + gradient_fn, + gradient_kwargs, + _n=_n + 1, + max_diff=max_diff, + mode=mode, + ), + nums=output_sizes, + ) + + vjps = tf.unstack(tf.concat(vjps, 0), num=len(parameters)) + + else: + # Gradient function is not a gradient transform + # (e.g., it might be a device method). + # Note that unlike the previous branch: + # + # - there is no recursion here + # - gradient_fn is not differentiable + # + # so we cannot support higher-order derivatives. + len_all_params = len(all_params) + + def _backward(*all_params): + dy = all_params[len_all_params:] + all_params = all_params[:len_all_params] + params_unwrapped = _nest_params(all_params) + + with qml.tape.Unwrap(*tapes, params=params_unwrapped): + vjps = _compute_vjp(dy, gradient_fn(tapes, **gradient_kwargs)) + + return vjps + + vjps = tf.numpy_function( + func=_backward, + inp=list(all_params) + dy, + Tout=[tf.float64] * len(parameters), + ) + + vjps = iter(vjps) + vjps = [next(vjps) if x in trainable else None for x in range(len(all_params))] + + variables = tfkwargs.get("variables", None) + return (vjps, variables) if variables is not None else vjps + + return res, grad_fn + + return _execute(*all_params) diff --git a/pennylane/interfaces/batch/torch.py b/pennylane/interfaces/batch/torch.py index 7333200a412..e68f9d3ba76 100644 --- a/pennylane/interfaces/batch/torch.py +++ b/pennylane/interfaces/batch/torch.py @@ -191,7 +191,7 @@ def backward(ctx, *dy): return (None,) + tuple(vjps) -def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2): +def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2, mode=None): """Execute a batch of tapes with Torch parameters on a device. This function may be called recursively, if ``gradient_fn`` is a differentiable @@ -215,11 +215,14 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d the maximum order of derivatives to support. Increasing this value allows for higher order derivatives to be extracted, at the cost of additional (classical) computational overhead during the backwards pass. + mode (str): Whether the gradients should be computed on the forward + pass (``forward``) or the backward pass (``backward``). Returns: list[list[torch.Tensor]]: A nested list of tape results. Each element in the returned list corresponds in order to the provided tapes. """ + # pylint: disable=unused-argument parameters = [] for tape in tapes: # set the trainable parameters diff --git a/tests/interfaces/test_batch_tensorflow_qnode.py b/tests/interfaces/test_batch_tensorflow_qnode.py index d58d3fce307..3215cdd3456 100644 --- a/tests/interfaces/test_batch_tensorflow_qnode.py +++ b/tests/interfaces/test_batch_tensorflow_qnode.py @@ -1329,3 +1329,188 @@ def circuit(): assert isinstance(result, tf.Tensor) assert np.array_equal(result.shape, (3, n_sample)) assert result.dtype == tf.int64 + + +@pytest.mark.parametrize( + "decorator, interface", [(tf.function, "tf"), (lambda x: x, "tf-autograph")] +) +class TestAutograph: + """Tests for Autograph mode. This class is parametrized over the combination: + + 1. interface="tf" with the QNode decoratored with @tf.function, and + 2. interface="tf-autograph" with no QNode decorator. + + Option (1) checks that if the user enables autograph functionality + in TensorFlow, the new `tf-autograph` interface is automatically applied. + + Option (2) ensures that the tf-autograph interface can be manually applied, + even if in eager execution mode. + """ + + def test_autograph_gradients(self, decorator, interface, tol): + """Test that a parameter-shift QNode can be compiled + using @tf.function, and differentiated""" + dev = qml.device("default.qubit", wires=2) + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @decorator + @qnode(dev, diff_method="parameter-shift", interface=interface) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=[0]), qml.probs(wires=[1]) + + with tf.GradientTape() as tape: + p0, p1 = circuit(x, y) + loss = p0[0] + p1[1] + + expected = tf.cos(x / 2) ** 2 + (1 - tf.cos(x) * tf.cos(y)) / 2 + assert np.allclose(loss, expected, atol=tol, rtol=0) + + grad = tape.gradient(loss, [x, y]) + expected = [-tf.sin(x) * tf.sin(y / 2) ** 2, tf.cos(x) * tf.sin(y) / 2] + assert np.allclose(grad, expected, atol=tol, rtol=0) + + def test_autograph_jacobian(self, decorator, interface, tol): + """Test that a parameter-shift vector-valued QNode can be compiled + using @tf.function, and differentiated""" + dev = qml.device("default.qubit", wires=2) + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @decorator + @qnode(dev, diff_method="parameter-shift", max_diff=1, interface=interface) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.probs(wires=[0]), qml.probs(wires=[1]) + + with tf.GradientTape() as tape: + res = circuit(x, y) + + expected = np.array( + [ + [tf.cos(x / 2) ** 2, tf.sin(x / 2) ** 2], + [(1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = tape.jacobian(res, [x, y]) + expected = np.array( + [ + [ + [-tf.sin(x) / 2, tf.sin(x) / 2], + [-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2], + ], + [ + [0, 0], + [-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2], + ], + ] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + @pytest.mark.parametrize("mode", ["forward", "backward"]) + def test_autograph_adjoint(self, mode, decorator, interface, tol): + """Test that a parameter-shift vQNode can be compiled + using @tf.function, and differentiated to second order""" + dev = qml.device("default.qubit", wires=1) + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @decorator + @qnode(dev, diff_method="adjoint", interface=interface, mode=mode) + def circuit(x): + qml.RY(x[0], wires=0) + qml.RX(x[1], wires=0) + return qml.expval(qml.PauliZ(0)) + + x = tf.Variable([1.0, 2.0], dtype=tf.float64) + + with tf.GradientTape() as tape: + res = circuit(x) + g = tape.gradient(res, x) + a, b = x * 1.0 + + expected_res = tf.cos(a) * tf.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + def test_autograph_hessian(self, decorator, interface, tol): + """Test that a parameter-shift vQNode can be compiled + using @tf.function, and differentiated to second order""" + dev = qml.device("default.qubit", wires=1) + a = tf.Variable(0.543, dtype=tf.float64) + b = tf.Variable(-0.654, dtype=tf.float64) + + @decorator + @qnode(dev, diff_method="parameter-shift", max_diff=2, interface=interface) + def circuit(x, y): + qml.RY(x, wires=0) + qml.RX(y, wires=0) + return qml.expval(qml.PauliZ(0)) + + with tf.GradientTape() as tape1: + with tf.GradientTape() as tape2: + res = circuit(a, b) + g = tape2.gradient(res, [a, b]) + g = tf.stack(g) + + hess = tf.stack(tape1.gradient(g, [a, b])) + + expected_res = tf.cos(a) * tf.cos(b) + assert np.allclose(res, expected_res, atol=tol, rtol=0) + + expected_g = [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)] + assert np.allclose(g, expected_g, atol=tol, rtol=0) + + expected_hess = [ + [-tf.cos(a) * tf.cos(b) + tf.sin(a) * tf.sin(b)], + [tf.sin(a) * tf.sin(b) - tf.cos(a) * tf.cos(b)], + ] + assert np.allclose(hess, expected_hess, atol=tol, rtol=0) + + def test_autograph_state(self, decorator, interface, tol): + """Test that a parameter-shift QNode returning a state can be compiled + using @tf.function""" + dev = qml.device("default.qubit", wires=2) + x = tf.Variable(0.543, dtype=tf.float64) + y = tf.Variable(-0.654, dtype=tf.float64) + + @decorator + @qnode(dev, diff_method="parameter-shift", interface=interface) + def circuit(x, y): + qml.RX(x, wires=[0]) + qml.RY(y, wires=[1]) + qml.CNOT(wires=[0, 1]) + return qml.state() + + with tf.GradientTape() as tape: + state = circuit(x, y) + probs = tf.abs(state) ** 2 + loss = probs[0] + + expected = tf.cos(x / 2) ** 2 * tf.cos(y / 2) ** 2 + assert np.allclose(loss, expected, atol=tol, rtol=0) + + def test_autograph_dimension(self, decorator, interface, tol): + """Test sampling works as expected""" + dev = qml.device("default.qubit", wires=2, shots=10) + + @decorator + @qnode(dev, diff_method="parameter-shift", interface=interface) + def circuit(): + qml.Hadamard(wires=[0]) + qml.CNOT(wires=[0, 1]) + return [qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))] + + res = circuit() + + assert res.shape == (2, 10) + assert isinstance(res, tf.Tensor) diff --git a/tests/test_qnode.py b/tests/test_qnode.py index 2583ee35ce7..255d3f05a7e 100644 --- a/tests/test_qnode.py +++ b/tests/test_qnode.py @@ -35,10 +35,7 @@ def test_invalid_interface(self): """Test that an exception is raised for an invalid interface""" dev = qml.device("default.qubit", wires=1) test_interface = "something" - expected_error = ( - fr"Unknown interface {test_interface}\. Interface must be " - r"one of \[None, 'autograd', 'numpy', 'jax', 'JAX', 'torch', 'pytorch', 'tf', 'tensorflow'\]\." - ) + expected_error = fr"Unknown interface {test_interface}\. Interface must be one of" with pytest.raises(qml.QuantumFunctionError, match=expected_error): QNode(dummyfunc, dev, interface="something") @@ -54,10 +51,7 @@ def circuit(x): qml.RX(wires=0) return qml.probs(wires=0) - expected_error = ( - fr"Unknown interface {test_interface}\. Interface must be " - r"one of \[None, 'autograd', 'numpy', 'jax', 'JAX', 'torch', 'pytorch', 'tf', 'tensorflow'\]\." - ) + expected_error = fr"Unknown interface {test_interface}\. Interface must be one of" with pytest.raises(qml.QuantumFunctionError, match=expected_error): circuit.interface = test_interface