Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove abs from DefaultQubit #2057

Merged
merged 7 commits into from
Dec 22, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,10 @@

<h3>Bug fixes</h3>

* Fixes a bug in `DefaultQubit` where the second derivative of QNodes at
positions corresponding to vanishing state vector amplitudes is wrong.
[(#2057)](https://github.com/PennyLaneAI/pennylane/pull/2057)

* Fixes a bug where PennyLane didn't require v0.20.0 of PennyLane-Lightning,
but raised an error with versions of Lightning earlier than v0.20.0 due to
the new batch execution pipeline.
Expand All @@ -159,4 +163,4 @@

This release contains contributions from (in alphabetical order):

Juan Miguel Arrazola, Ali Asadi, Esther Cruz, Olivia Di Matteo, Diego Guala, Ankit Khandelwal, Antal Száva, David Wierichs, Shaoming Zhang
Juan Miguel Arrazola, Ali Asadi, Esther Cruz, Olivia Di Matteo, Diego Guala, Ankit Khandelwal, Antal Száva, David Wierichs, Shaoming Zhang
5 changes: 4 additions & 1 deletion pennylane/devices/default_qubit.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,5 +789,8 @@ def analytic_probability(self, wires=None):
if self._state is None:
return None

prob = self.marginal_prob(self._abs(self._flatten(self._state)) ** 2, wires)
flat_state = self._flatten(self._state)
real_state = self._real(flat_state)
imag_state = self._imag(flat_state)
prob = self.marginal_prob(real_state ** 2 + imag_state ** 2, wires)
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
return prob
1 change: 1 addition & 0 deletions pennylane/devices/default_qubit_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ class DefaultQubitAutograd(DefaultQubit):
_transpose = staticmethod(np.transpose)
_tensordot = staticmethod(np.tensordot)
_conj = staticmethod(np.conj)
_real = staticmethod(np.real)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Minor, but we are in the process of slowly migrating the devices to qml.math :) Not an issue here though!

_imag = staticmethod(np.imag)
_roll = staticmethod(np.roll)
_stack = staticmethod(np.stack)
Expand Down
1 change: 1 addition & 0 deletions pennylane/devices/default_qubit_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ def circuit():
)
)
_conj = staticmethod(jnp.conj)
_real = staticmethod(jnp.real)
_imag = staticmethod(jnp.imag)
_roll = staticmethod(jnp.roll)
_stack = staticmethod(jnp.stack)
Expand Down
1 change: 1 addition & 0 deletions pennylane/devices/default_qubit_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ class DefaultQubitTF(DefaultQubit):
_transpose = staticmethod(tf.transpose)
_tensordot = staticmethod(tf.tensordot)
_conj = staticmethod(tf.math.conj)
_real = staticmethod(tf.math.real)
_imag = staticmethod(tf.math.imag)
_roll = staticmethod(tf.roll)
_stack = staticmethod(tf.stack)
Expand Down
1 change: 1 addition & 0 deletions pennylane/devices/default_qubit_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def circuit(x):
_transpose = staticmethod(lambda a, axes=None: a.permute(*axes))
_asnumpy = staticmethod(lambda x: x.cpu().numpy())
_conj = staticmethod(torch.conj)
_real = staticmethod(torch.real)
_imag = staticmethod(torch.imag)
_norm = staticmethod(torch.norm)
_flatten = staticmethod(torch.flatten)
Expand Down
16 changes: 16 additions & 0 deletions tests/devices/test_default_qubit_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,22 @@ def circuit(a, b):
)
assert np.allclose(res, expected_grad, atol=tol, rtol=0)

@pytest.mark.parametrize("x, shift", [(0.0, 0.0), (0.5, -0.5)])
def test_hessian_at_zero(self, x, shift):
"""Tests that the Hessian at vanishing state vector amplitudes
is correct."""
dev = qml.device("default.qubit.autograd", wires=1)

@qml.qnode(dev, interface="autograd", diff_method="backprop")
def circuit(x):
qml.RY(shift, wires=0)
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(0))

assert qml.math.isclose(qml.jacobian(circuit)(x), 0.0)
assert qml.math.isclose(qml.jacobian(qml.jacobian(circuit))(x), -1.0)
assert qml.math.isclose(qml.grad(qml.grad(circuit))(x), -1.0)

@pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition])
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift", "finite-diff"])
def test_autograd_interface_gradient(self, operation, diff_method, tol):
Expand Down
16 changes: 16 additions & 0 deletions tests/devices/test_default_qubit_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,22 @@ def circuit(a, b):

assert jnp.allclose(jnp.array(res), jnp.array(expected_grad), atol=tol, rtol=0)

@pytest.mark.parametrize("x, shift", [(0.0, 0.0), (0.5, -0.5)])
def test_hessian_at_zero(self, x, shift):
"""Tests that the Hessian at vanishing state vector amplitudes
is correct."""
dev = qml.device("default.qubit.jax", wires=1)

@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit(x):
qml.RY(shift, wires=0)
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(0))

assert qml.math.isclose(jax.grad(circuit)(x), 0.0)
assert qml.math.isclose(jax.jacobian(jax.jacobian(circuit))(x), -1.0)
assert qml.math.isclose(jax.grad(jax.grad(circuit))(x), -1.0)

@pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition])
@pytest.mark.parametrize("diff_method", ["backprop"])
def test_jax_interface_gradient(self, operation, diff_method, tol):
Expand Down
27 changes: 27 additions & 0 deletions tests/devices/test_default_qubit_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -1323,6 +1323,33 @@ def circuit(a, b):
res = tape.gradient(res, [a_tf, b_tf])
assert np.allclose(res, expected_grad, atol=tol, rtol=0)

@pytest.mark.parametrize("x, shift", [(0.0, 0.0), (0.5, -0.5)])
def test_hessian_at_zero(self, x, shift):
"""Tests that the Hessian at vanishing state vector amplitudes
is correct."""
dev = qml.device("default.qubit.tf", wires=1)

shift = tf.constant(shift)
x = tf.Variable(x)

@qml.qnode(dev, interface="tf", diff_method="backprop")
def circuit(x):
qml.RY(shift, wires=0)
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(0))

with tf.GradientTape(persistent=True) as t2:
with tf.GradientTape(persistent=True) as t1:
value = circuit(x)
grad = t1.gradient(value, x)
jac = t1.jacobian(value, x)
hess_grad = t2.gradient(grad, x)
hess_jac = t2.jacobian(jac, x)

assert qml.math.isclose(grad, 0.0)
assert qml.math.isclose(hess_grad, -1.0)
assert qml.math.isclose(hess_jac, -1.0)

@pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition])
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift", "finite-diff"])
def test_tf_interface_gradient(self, operation, diff_method, tol):
Expand Down
20 changes: 20 additions & 0 deletions tests/devices/test_default_qubit_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1425,6 +1425,26 @@ def circuit(a, b):
assert torch.allclose(a.grad, -0.5 * torch.sin(a) * (torch.cos(b) + 1), atol=tol, rtol=0)
assert torch.allclose(b.grad, 0.5 * torch.sin(b) * (1 - torch.cos(a)))

@pytest.mark.parametrize("x, shift", [(0.0, 0.0), (0.5, -0.5)])
def test_hessian_at_zero(self, torch_device, x, shift):
"""Tests that the Hessian at vanishing state vector amplitudes
is correct."""
dev = qml.device("default.qubit.torch", wires=1, torch_device=torch_device)

x = torch.tensor(x, requires_grad=True)

@qml.qnode(dev, interface="torch", diff_method="backprop")
def circuit(x):
qml.RY(shift, wires=0)
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(0))

grad = torch.autograd.functional.jacobian(circuit, x)
hess = torch.autograd.functional.hessian(circuit, x)

assert qml.math.isclose(grad, torch.tensor(0.0))
assert qml.math.isclose(hess, torch.tensor(-1.0))

@pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition])
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift", "finite-diff"])
def test_torch_interface_gradient(self, torch_device, operation, diff_method, tol):
Expand Down