Skip to content

Commit

Permalink
Add support for the new VQE workflows to the differentiable batch exe…
Browse files Browse the repository at this point in the history
…cution pipeline (#1608)

* Add a tape unwrapping context manager

* tests

* more

* changelog

* suggested changes

* suggested changes

* suggested changes

* add comment

* linting

* linting

* linting

* Add support for gradient decompositions

* update changelog

* update changelog

* more

* more

* more

* more

* more

* more

* black

* fix

* tests

* more tests

* more tests

* suggested changes

* suggested changes

* suggested changes

* suggested changes

* Apply suggestions from code review

Co-authored-by: Olivia Di Matteo <2068515+glassnotes@users.noreply.github.com>

* suggested changes

* Update pennylane/transforms/batch_transform.py

Co-authored-by: Olivia Di Matteo <2068515+glassnotes@users.noreply.github.com>

* update changelog

* Added custom gradient transform decorator

* changelog

* typo

* adding test file to repo

* linting

* fix

* Add gradient documentation

* update

* update

* update

* Apply suggestions from code review

Co-authored-by: David Wierichs <davidwierichs@gmail.com>

* more

* more

* fixes

* more tests

* more tests

* more

* more

* more

* more

* more

* more tests

* more tests

* fixes

* remove

* update changelog

* merge master

* merge master

* update

* Apply suggestions from code review

Co-authored-by: Olivia Di Matteo <2068515+glassnotes@users.noreply.github.com>

Co-authored-by: Olivia Di Matteo <2068515+glassnotes@users.noreply.github.com>
Co-authored-by: David Wierichs <davidwierichs@gmail.com>
  • Loading branch information
3 people committed Sep 7, 2021
1 parent d336322 commit b9231d4
Show file tree
Hide file tree
Showing 14 changed files with 655 additions and 31 deletions.
1 change: 1 addition & 0 deletions .github/CHANGELOG.md
Expand Up @@ -271,6 +271,7 @@
[(#1508)](https://github.com/PennyLaneAI/pennylane/pull/1508)
[(#1542)](https://github.com/PennyLaneAI/pennylane/pull/1542)
[(#1549)](https://github.com/PennyLaneAI/pennylane/pull/1549)
[(#1608)](https://github.com/PennyLaneAI/pennylane/pull/1608)

For example:

Expand Down
2 changes: 1 addition & 1 deletion pennylane/_device.py
Expand Up @@ -527,7 +527,7 @@ def execute_and_gradients(self, circuits, method="jacobian", **kwargs):
# Evaluations and gradients are paired, so that
# devices can re-use the device state for the
# gradient computation (if applicable).
res.append(circuit.execute(self))
res.append(self.batch_execute([circuit])[0])
jacs.append(gradient_method(circuit, **kwargs))

return res, jacs
Expand Down
8 changes: 7 additions & 1 deletion pennylane/devices/default_qubit.py
Expand Up @@ -472,7 +472,10 @@ def expval(self, observable, shot_range=None, bin_size=None):
if observable.name in ("Hamiltonian", "SparseHamiltonian"):
assert self.shots is None, f"{observable.name} must be used with shots=None"

backprop_mode = not isinstance(self.state, np.ndarray)
backprop_mode = (
not isinstance(self.state, np.ndarray)
or any(not isinstance(d, (float, np.ndarray)) for d in observable.data)
) and observable.name == "Hamiltonian"

if backprop_mode:
# We must compute the expectation value assuming that the Hamiltonian
Expand Down Expand Up @@ -513,6 +516,9 @@ def expval(self, observable, shot_range=None, bin_size=None):
coo_matrix.dot(Hmat, coo_matrix(self.state.reshape(len(self.state), 1))),
).toarray()[0]

if observable.name == "Hamiltonian":
res = qml.math.squeeze(res)

return qml.math.real(res)

return super().expval(observable, shot_range=shot_range, bin_size=bin_size)
Expand Down
1 change: 0 additions & 1 deletion pennylane/gradients/__init__.py
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Quantum gradient transforms are strategies for computing the gradient of a quantum
circuit that work by **transforming** the quantum circuit into one or more gradient circuits.
These gradient circuits, once executed and post-processed, return the gradient
Expand Down
28 changes: 21 additions & 7 deletions pennylane/gradients/hamiltonian_grad.py
Expand Up @@ -25,18 +25,32 @@ def hamiltonian_grad(tape, idx, params=None):
idx (int): index of parameter that we differentiate with respect to
params (array): explicit parameters to set
"""
op, p_idx = tape.get_operation(idx)
new_tape = tape.copy(copy_operations=True)

t_idx = list(tape.trainable_params)[idx]
op = tape._par_info[t_idx]["op"]
p_idx = tape._par_info[t_idx]["p_idx"]

new_tape = tape.copy(copy_operations=True, tape_cls=qml.tape.QuantumTape)
if params is not None:
new_tape.set_parameters(params=params)

new_tape._measurements = [qml.expval(op.ops[p_idx])]
# get position in queue

queue_position = tape.observables.index(op)
new_tape._measurements[queue_position] = qml.expval(op.ops[p_idx])

new_tape._par_info = {}
new_tape._update()

return [new_tape], lambda x: qml.math.squeeze(x)
if len(tape.measurements) > 1:

def processing_fn(results):
res = results[0][queue_position]
zeros = qml.math.zeros_like(res)

final = []
for i, _ in enumerate(tape.measurements):
final.append(res if i == queue_position else zeros)

return qml.math.expand_dims(qml.math.stack(final), 0)

return [new_tape], processing_fn

return [new_tape], lambda x: x
43 changes: 28 additions & 15 deletions pennylane/gradients/parameter_shift.py
Expand Up @@ -15,7 +15,7 @@
This module contains functions for computing the parameter-shift gradient
of a qubit-based quantum tape.
"""
# pylint: disable=protected-access,too-many-arguments
# pylint: disable=protected-access,too-many-arguments,too-many-statements
import numpy as np

import pennylane as qml
Expand Down Expand Up @@ -66,19 +66,8 @@ def _get_operation_recipe(tape, t_idx, shift=np.pi / 2):
If the corresponding operation has grad_recipe=None, then
the default two-term parameter-shift rule is assumed.
"""
# get the index of the parameter in the tape
parameter_idx = list(tape.trainable_params)[t_idx]

# get the corresponding operation
op = tape._par_info[parameter_idx]["op"]

# get the corresponding operation parameter index
# (that is, index of the parameter within the operation)
op_p_idx = tape._par_info[parameter_idx]["p_idx"]

# return the parameter-shift gradient for that
# operation parameter.
return op.get_parameter_shift(op_p_idx, shift=shift)
op, p_idx = tape.get_operation(t_idx)
return op.get_parameter_shift(p_idx, shift=shift)


def _process_gradient_recipe(gradient_recipe, tol=1e-10):
Expand Down Expand Up @@ -150,19 +139,40 @@ def expval_param_shift(tape, argnum=None, shift=np.pi / 2, gradient_recipes=None
shapes = []
unshifted_coeffs = []

fns = []

for idx, _ in enumerate(tape.trainable_params):

if idx not in argnum:
# parameter has zero gradient
shapes.append(0)
gradient_coeffs.append([])
fns.append(None)
continue

op, _ = tape.get_operation(idx)

if op.name == "Hamiltonian":
# operation is a Hamiltonian
if op.return_type is not qml.operation.Expectation:
raise ValueError(
"Can only differentiate Hamiltonian "
f"coefficients for expectations, not {op.return_type.value}"
)

g_tapes, h_fn = qml.gradients.hamiltonian_grad(tape, idx)
gradient_tapes.extend(g_tapes)
shapes.append(1)
gradient_coeffs.append(np.array([1.0]))
fns.append(h_fn)
continue

# get the gradient recipe for the trainable parameter
recipe = gradient_recipes[argnum.index(idx)]
recipe = recipe or _get_operation_recipe(tape, idx, shift=shift)
recipe = _process_gradient_recipe(recipe)
coeffs, multipliers, shifts = recipe
fns.append(None)

if shifts[0] == 0 and multipliers[0] == 1:
# Gradient recipe includes a term with zero shift.
Expand All @@ -189,7 +199,7 @@ def processing_fn(results):
start = 1 if unshifted_coeffs and f0 is None else 0
r0 = f0 or results[0]

for i, s in enumerate(shapes):
for i, (s, f) in enumerate(zip(shapes, fns)):

if s == 0:
# parameter has zero gradient
Expand All @@ -200,6 +210,9 @@ def processing_fn(results):
res = results[start : start + s]
start = start + s

if f is not None:
res = f(res)

# compute the linear combination of results and coefficients
res = qml.math.stack(res)
g = qml.math.tensordot(res, qml.math.convert_like(gradient_coeffs[i], res), [[0], [0]])
Expand Down
2 changes: 1 addition & 1 deletion pennylane/interfaces/batch/autograd.py
Expand Up @@ -190,7 +190,7 @@ def grad_fn(dy):
gradient_kwargs=gradient_kwargs,
)

vjps = processing_fn(execute_fn(vjp_tapes)[0])
vjps = processing_fn(execute_fn(vjp_tapes)[0])

else:
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
Expand Down
2 changes: 1 addition & 1 deletion pennylane/interfaces/batch/tensorflow.py
Expand Up @@ -122,7 +122,7 @@ def grad_fn(*dy, **tfkwargs):
gradient_kwargs=gradient_kwargs,
)

vjps = processing_fn(execute_fn(vjp_tapes)[0])
vjps = processing_fn(execute_fn(vjp_tapes)[0])

else:
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
Expand Down
23 changes: 23 additions & 0 deletions pennylane/tape/tape.py
Expand Up @@ -738,6 +738,29 @@ def trainable_params(self, param_indices):

self._trainable_params = param_indices

def get_operation(self, idx):
"""Returns the trainable operation, and the corresponding operation argument
index, for a specified trainable parameter index.
Args:
idx (int): the trainable parameter index
Returns:
tuple[.Operation, int]: tuple containing the corresponding
operation, and an integer representing the argument index,
for the provided trainable parameter.
"""
# get the index of the parameter in the tape
t_idx = list(self.trainable_params)[idx]

# get the corresponding operation
op = self._par_info[t_idx]["op"]

# get the corresponding operation parameter index
# (that is, index of the parameter within the operation)
p_idx = self._par_info[t_idx]["p_idx"]
return op, p_idx

def get_parameters(self, trainable_only=True, **kwargs): # pylint:disable=unused-argument
"""Return the parameters incident on the tape operations.
Expand Down
1 change: 0 additions & 1 deletion pennylane/transforms/classical_jacobian.py
Expand Up @@ -96,7 +96,6 @@ def _jacobian(*args, **kwargs): # pylint: disable=unused-argument

def _jacobian(*args, **kwargs):
with tf.GradientTape() as tape:
tape.watch(args)
gate_params = classical_preprocessing(*args, **kwargs)

return tape.jacobian(gate_params, args)
Expand Down

0 comments on commit b9231d4

Please sign in to comment.