Skip to content

Commit

Permalink
Fix qml.gradients docs examples (#5596)
Browse files Browse the repository at this point in the history
- [x] Fix last open bug or open issue


**Context:**
Docs get out of sync over time.
In particular, the output shape of gradient transforms applied to QNodes
was changed in #4945 but not updated in the docs.

**Description of the Change:**
Update examples in docs of `qml.gradients` module.

Also updates the `qml.kernels` docs, with a few very small changes.

**Benefits:**

**Possible Drawbacks:**

**Related GitHub Issues:**
  • Loading branch information
dwierichs committed Apr 30, 2024
1 parent d95390f commit ca31c43
Show file tree
Hide file tree
Showing 16 changed files with 166 additions and 167 deletions.
27 changes: 13 additions & 14 deletions pennylane/gradients/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@
.. code-block:: python
dev = qml.device("default.qubit", wires=2, shots=1000)
dev = qml.device("default.qubit", shots=1000)
@qml.qnode(dev, interface="tf")
def circuit(weights):
Expand Down Expand Up @@ -143,7 +143,7 @@ def circuit(weights):
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
dev = qml.device("default.qubit")
@qml.qnode(dev)
def circuit(weights):
Expand All @@ -157,9 +157,8 @@ def circuit(weights):
>>> circuit(weights)
tensor([0.9658079, 0.0341921], requires_grad=True)
>>> qml.gradients.param_shift(circuit)(weights)
(tensor([-0.04673668, 0.04673668], requires_grad=True),
tensor([-0.09442394, 0.09442394], requires_grad=True),
tensor([-0.14409127, 0.14409127], requires_grad=True))
tensor([[-0.04673668, -0.09442394, -0.14409127],
[ 0.04673668, 0.09442394, 0.14409127]], requires_grad=True)
Comparing this to autodifferentiation:
Expand All @@ -173,7 +172,7 @@ def circuit(weights):
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
dev = qml.device("default.qubit")
@qml.gradients.param_shift
@qml.qnode(dev)
Expand All @@ -185,9 +184,8 @@ def decorated_circuit(weights):
return qml.probs(wires=1)
>>> decorated_circuit(weights)
(tensor([-0.04673668, 0.04673668], requires_grad=True),
tensor([-0.09442394, 0.09442394], requires_grad=True),
tensor([-0.14409127, 0.14409127], requires_grad=True))
tensor([[-0.04673668, -0.09442394, -0.14409127],
[ 0.04673668, 0.09442394, 0.14409127]], requires_grad=True)
.. note::
Expand All @@ -203,6 +201,9 @@ def decorated_circuit(weights):
when applying the transform:
>>> qml.gradients.param_shift(circuit, hybrid=False)(weights)
(tensor([-0.04673668, 0.04673668], requires_grad=True),
tensor([-0.09442394, 0.09442394], requires_grad=True),
tensor([-0.14409127, 0.14409127], requires_grad=True))
Differentiating gradient transforms and higher-order derivatives
Expand All @@ -213,7 +214,7 @@ def decorated_circuit(weights):
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
dev = qml.device("default.qubit")
@qml.qnode(dev)
def circuit(weights):
Expand All @@ -227,9 +228,7 @@ def circuit(weights):
>>> circuit(weights)
tensor(0.9316158, requires_grad=True)
>>> qml.gradients.param_shift(circuit)(weights) # gradient
(tensor(-0.09347337, requires_grad=True),
tensor(-0.18884787, requires_grad=True),
tensor(-0.28818254, requires_grad=True))
tensor([-0.09347337, -0.18884787, -0.28818254], requires_grad=True)
>>> def stacked_output(weights):
... return qml.numpy.stack(qml.gradients.param_shift(circuit)(weights))
>>> qml.jacobian(stacked_output)(weights) # hessian
Expand Down Expand Up @@ -298,7 +297,7 @@ def circuit(weights):
The output tapes can then be evaluated and post-processed to retrieve
the gradient:
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> fn(qml.execute(gradient_tapes, dev, None))
(tensor(-0.09347337, requires_grad=True),
tensor(-0.18884787, requires_grad=True),
Expand Down
3 changes: 1 addition & 2 deletions pennylane/gradients/classical_jacobian.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ def classical_jacobian(qnode, argnum=None, expand_fn=None, trainable_only=True):
>>> print(cjac)
[[1. 0. 0. ]
[0.2 0. 0. ]
[0. 0. 0. ]
[0. 1.2 0. ]
[0. 2. 0. ]
[0. 0. 1. ]]
The returned Jacobian has rows corresponding to gate arguments, and columns
Expand Down
44 changes: 19 additions & 25 deletions pennylane/gradients/finite_difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def finite_diff(
This transform can be registered directly as the quantum gradient transform
to use during autodifferentiation:
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> @qml.qnode(dev, interface="autograd", diff_method="finite-diff")
... def circuit(params):
... qml.RX(params[0], wires=0)
Expand All @@ -270,7 +270,7 @@ def finite_diff(
post-processing.
>>> import jax
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> @qml.qnode(dev, interface="jax", diff_method="finite-diff")
... def circuit(params):
... qml.RX(params[0], wires=0)
Expand All @@ -280,7 +280,7 @@ def finite_diff(
>>> params = jax.numpy.array([0.1, 0.2, 0.3])
>>> jax.jacobian(circuit)(params)
(Array([-0.38751727, -0.18884793, -0.3835571 ], dtype=float32),
Array([0.6991687 , 0.34072432, 0.6920237 ], dtype=float32))
Array([0.6991687 , 0.34072432, 0.6920237 ], dtype=float32))
.. details::
Expand All @@ -299,12 +299,8 @@ def finite_diff(
... return qml.expval(qml.Z(0)), qml.var(qml.Z(0))
>>> params = np.array([0.1, 0.2, 0.3], requires_grad=True)
>>> qml.gradients.finite_diff(circuit)(params)
((tensor(-0.38751724, requires_grad=True),
tensor(-0.18884792, requires_grad=True),
tensor(-0.38355709, requires_grad=True)),
(tensor(0.69916868, requires_grad=True),
tensor(0.34072432, requires_grad=True),
tensor(0.69202366, requires_grad=True)))
(tensor([-0.38751724, -0.18884792, -0.38355708], requires_grad=True),
tensor([0.69916868, 0.34072432, 0.69202365], requires_grad=True))
This quantum gradient transform can also be applied to low-level
:class:`~.QuantumTape` objects. This will result in no implicit quantum
Expand All @@ -317,9 +313,9 @@ def finite_diff(
>>> gradient_tapes, fn = qml.gradients.finite_diff(tape)
>>> gradient_tapes
[<QuantumTape: wires=[0], params=3>,
<QuantumTape: wires=[0], params=3>,
<QuantumTape: wires=[0], params=3>,
<QuantumTape: wires=[0], params=3>]
<QuantumScript: wires=[0], params=3>,
<QuantumScript: wires=[0], params=3>,
<QuantumScript: wires=[0], params=3>]
This can be useful if the underlying circuits representing the gradient
computation need to be analyzed.
Expand All @@ -338,32 +334,30 @@ def finite_diff(
The output tapes can then be evaluated and post-processed to retrieve the gradient:
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> fn(qml.execute(gradient_tapes, dev, None))
((tensor(-0.56464251, requires_grad=True),
tensor(-0.56464251, requires_grad=True),
tensor(-0.56464251, requires_grad=True)),
(tensor(0.93203912, requires_grad=True),
tensor(0.93203912, requires_grad=True),
tensor(0.93203912, requires_grad=True)))
tensor(-0.56464251, requires_grad=True),
tensor(-0.56464251, requires_grad=True)),
(tensor(0.93203912, requires_grad=True),
tensor(0.93203912, requires_grad=True),
tensor(0.93203912, requires_grad=True)))
This gradient transform is compatible with devices that use shot vectors for execution.
>>> shots = (10, 100, 1000)
>>> dev = qml.device("default.qubit", wires=2, shots=shots)
>>> dev = qml.device("default.qubit", shots=shots)
>>> @qml.qnode(dev)
... def circuit(params):
... qml.RX(params[0], wires=0)
... qml.RY(params[1], wires=0)
... qml.RX(params[2], wires=0)
... return qml.expval(qml.Z(0)), qml.var(qml.Z(0))
>>> params = np.array([0.1, 0.2, 0.3], requires_grad=True)
>>> qml.gradients.finite_diff(circuit, h=10e-2)(params)
(((array(-2.), array(-2.), array(0.)), (array(3.6), array(3.6), array(0.))),
((array(1.), array(0.4), array(1.)),
(array(-1.62), array(-0.624), array(-1.62))),
((array(-0.48), array(-0.34), array(-0.46)),
(array(0.84288), array(0.6018), array(0.80868))))
>>> qml.gradients.finite_diff(circuit, h=0.1)(params)
((array([-2., -2., 0.]), array([3.6, 3.6, 0. ])),
(array([1. , 0.2, 0.4]), array([-1.78 , -0.34 , -0.688])),
(array([-0.9 , -0.22, -0.48]), array([1.5498 , 0.3938 , 0.84672])))
The outermost tuple contains results corresponding to each element of the shot vector.
"""
Expand Down
9 changes: 6 additions & 3 deletions pennylane/gradients/general_shift_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def generate_shift_rule(frequencies, shifts=None, order=1):
[ 0.5 , -3.14159265]])
This corresponds to the shift rule
:math:`\frac{\partial^2 f}{\partial phi^2} = \frac{1}{2} \left[f(\phi) - f(\phi-\pi)\right]`.
:math:`\frac{\partial^2 f}{\partial \phi^2} = \frac{1}{2} \left[f(\phi) - f(\phi-\pi)\right]`.
"""
frequencies = tuple(f for f in frequencies if f > 0)
rule = _get_shift_rule(frequencies, shifts=shifts)
Expand Down Expand Up @@ -368,9 +368,12 @@ def generate_multi_shift_rule(frequencies, shifts=None, orders=None):
.. math::
\begin{align*}
\frac{\partial^2 f}{\partial x\partial y} &= \frac{1}{4}
\left[f(x+\pi/2, y+\pi/2) - f(x+\pi/2, y-\pi/2)\\
&~~~- f(x-\pi/2, y+\pi/2) + f(x-\pi/2, y-\pi/2) \right].
[f(x+\pi/2, y+\pi/2) - f(x+\pi/2, y-\pi/2)\\
&\phantom{\frac{1}{4}[}-f(x-\pi/2, y+\pi/2) + f(x-\pi/2, y-\pi/2) ].
\end{align*}
"""
rules = []
shifts = shifts or [None] * len(frequencies)
Expand Down
59 changes: 30 additions & 29 deletions pennylane/gradients/hadamard_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,17 +113,18 @@ def hadamard_grad(
to use during autodifferentiation:
>>> import jax
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> @qml.qnode(dev, interface="jax", diff_method="hadamard")
... def circuit(params):
... qml.RX(params[0], wires=0)
... qml.RY(params[1], wires=0)
... qml.RX(params[2], wires=0)
... return qml.expval(qml.Z(0)), qml.var(qml.Z(0))
... return qml.expval(qml.Z(0)), qml.probs(wires=0)
>>> params = jax.numpy.array([0.1, 0.2, 0.3])
>>> jax.jacobian(circuit)(params)
(Array([-0.38751727, -0.18884793, -0.3835571 ], dtype=float32),
Array([0.6991687 , 0.34072432, 0.6920237 ], dtype=float32))
(Array([-0.3875172 , -0.18884787, -0.38355704], dtype=float64),
Array([[-0.1937586 , -0.09442394, -0.19177852],
[ 0.1937586 , 0.09442394, 0.19177852]], dtype=float64))
.. details::
:title: Usage Details
Expand All @@ -133,7 +134,7 @@ def hadamard_grad(
as the ``diff_method`` argument of the QNode decorator, and differentiating with your
preferred machine learning framework.
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> @qml.qnode(dev)
... def circuit(params):
... qml.RX(params[0], wires=0)
Expand All @@ -142,23 +143,21 @@ def hadamard_grad(
... return qml.expval(qml.Z(0))
>>> params = np.array([0.1, 0.2, 0.3], requires_grad=True)
>>> qml.gradients.hadamard_grad(circuit)(params)
(tensor([-0.3875172], requires_grad=True),
tensor([-0.18884787], requires_grad=True),
tensor([-0.38355704], requires_grad=True))
tensor([-0.3875172 , -0.18884787, -0.38355704], requires_grad=True)
This quantum gradient transform can also be applied to low-level
:class:`~.QuantumTape` objects. This will result in no implicit quantum
device evaluation. Instead, the processed tapes, and post-processing
function, which together define the gradient are directly returned:
>>> ops = [qml.RX(p, wires=0) for p in params]
>>> ops = [qml.RX(params[0], 0), qml.RY(params[1], 0), qml.RX(params[2], 0)]
>>> measurements = [qml.expval(qml.Z(0))]
>>> tape = qml.tape.QuantumTape(ops, measurements)
>>> gradient_tapes, fn = qml.gradients.hadamard_grad(tape)
>>> gradient_tapes
[<QuantumTape: wires=[0, 1], params=3>,
<QuantumTape: wires=[0, 1], params=3>,
<QuantumTape: wires=[0, 1], params=3>]
[<QuantumScript: wires=[0, 1], params=3>,
<QuantumScript: wires=[0, 1], params=3>,
<QuantumScript: wires=[0, 1], params=3>]
This can be useful if the underlying circuits representing the gradient
computation need to be analyzed.
Expand All @@ -177,14 +176,16 @@ def hadamard_grad(
The output tapes can then be evaluated and post-processed to retrieve the gradient:
>>> dev = qml.device("default.qubit", wires=2)
>>> dev = qml.device("default.qubit")
>>> fn(qml.execute(gradient_tapes, dev, None))
(array(-0.3875172), array(-0.18884787), array(-0.38355704))
(tensor(-0.3875172, requires_grad=True),
tensor(-0.18884787, requires_grad=True),
tensor(-0.38355704, requires_grad=True))
This transform can be registered directly as the quantum gradient transform
to use during autodifferentiation:
>>> dev = qml.device("default.qubit", wires=3)
>>> dev = qml.device("default.qubit")
>>> @qml.qnode(dev, interface="jax", diff_method="hadamard")
... def circuit(params):
... qml.RX(params[0], wires=0)
Expand All @@ -193,7 +194,7 @@ def hadamard_grad(
... return qml.expval(qml.Z(0))
>>> params = jax.numpy.array([0.1, 0.2, 0.3])
>>> jax.jacobian(circuit)(params)
[-0.3875172 -0.18884787 -0.38355704]
Array([-0.3875172 , -0.18884787, -0.38355704], dtype=float64)
If you use custom wires on your device, you need to pass an auxiliary wire.
Expand All @@ -207,24 +208,24 @@ def hadamard_grad(
... return qml.expval(qml.Z("a"))
>>> params = jax.numpy.array([0.1, 0.2, 0.3])
>>> jax.jacobian(circuit)(params)
[-0.3875172 -0.18884787 -0.38355704]
Array([-0.3875172 , -0.18884787, -0.38355704], dtype=float64)
.. note::
``hadamard_grad`` will decompose the operations that are not in the list of supported operations.
- ``pennylane.RX``
- ``pennylane.RY``
- ``pennylane.RZ``
- ``pennylane.Rot``
- ``pennylane.PhaseShift``
- ``pennylane.U1``
- ``pennylane.CRX``
- ``pennylane.CRY``
- ``pennylane.CRZ``
- ``pennylane.IsingXX``
- ``pennylane.IsingYY``
- ``pennylane.IsingZZ``
- :class:`~.pennylane.RX`
- :class:`~.pennylane.RY`
- :class:`~.pennylane.RZ`
- :class:`~.pennylane.Rot`
- :class:`~.pennylane.PhaseShift`
- :class:`~.pennylane.U1`
- :class:`~.pennylane.CRX`
- :class:`~.pennylane.CRY`
- :class:`~.pennylane.CRZ`
- :class:`~.pennylane.IsingXX`
- :class:`~.pennylane.IsingYY`
- :class:`~.pennylane.IsingZZ`
The expansion will fail if a suitable decomposition in terms of supported operation is not found.
The number of trainable parameters may increase due to the decomposition.
Expand Down
Loading

0 comments on commit ca31c43

Please sign in to comment.