From a5e27469f495b00a1d2a174525c77451ed2c1d46 Mon Sep 17 00:00:00 2001 From: dwierichs Date: Mon, 17 Jan 2022 23:42:53 +0100 Subject: [PATCH 01/10] use multi_dispatch decorator across math module --- pennylane/math/multi_dispatch.py | 96 ++++++++++++++++---------------- 1 file changed, 47 insertions(+), 49 deletions(-) diff --git a/pennylane/math/multi_dispatch.py b/pennylane/math/multi_dispatch.py index a61288dbd2a..6d16f25c573 100644 --- a/pennylane/math/multi_dispatch.py +++ b/pennylane/math/multi_dispatch.py @@ -98,7 +98,7 @@ def multi_dispatch(argnum=None, tensor_list=None): Args: - argnum (list[int]): A list of integers indicating indicating the indices + argnum (list[int]): A list of integers indicating the indices to dispatch (i.e., the arguments that are tensors handled by an interface). If ``None``, dispatch over all arguments. tensor_lists (list[int]): a list of integers indicating which indices @@ -126,12 +126,12 @@ def multi_dispatch(argnum=None, tensor_list=None): >>> stack = multi_dispatch(argnum=0, tensor_list=0)(autoray.numpy.stack) We can also use the ``multi_dispatch`` decorator to dispatch - arguments of more more elaborate custom functions. Here is an example + arguments of more elaborate custom functions. Here is an example of a ``custom_function`` that computes :math:`c \\sum_i (v_i)^T v_i`, where :math:`v_i` are vectors in ``values`` and - :math:`c` is a fixed ``coefficient``. Note how ``argnum=0`` only points to the first argument ``values``, - how ``tensor_list=0`` indicates that said first argument is a list of vectors, and that ``coefficient`` is not - dispatched. + :math:`c` is a fixed ``coefficient``. Note how ``argnum=0`` only points to the first + argument ``values``, how ``tensor_list=0`` indicates that said first argument is a + list of vectors, and that ``coefficient`` is not dispatched. >>> @math.multi_dispatch(argnum=0, tensor_list=0) >>> def custom_function(values, like, coefficient=10): @@ -178,8 +178,8 @@ def wrapper(*args, **kwargs): return decorator - -def block_diag(values): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def block_diag(values, like): """Combine a sequence of 2D tensors to form a block diagonal tensor. Args: @@ -203,12 +203,12 @@ def block_diag(values): [ 0, 0, -1, -6, -3, 0], [ 0, 0, 0, 0, 0, 5]]) """ - interface = _multi_dispatch(values) - values = np.coerce(values, like=interface) - return np.block_diag(values, like=interface) + values = np.coerce(values, like=like) + return np.block_diag(values, like=like) -def concatenate(values, axis=0): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def concatenate(values, like, axis=0): """Concatenate a sequence of tensors along the specified axis. .. warning:: @@ -235,9 +235,7 @@ def concatenate(values, axis=0): """ - interface = _multi_dispatch(values) - - if interface == "torch": + if like == "torch": import torch if axis is None: @@ -248,16 +246,17 @@ def concatenate(values, axis=0): else: values = [torch.as_tensor(t) for t in values] - if interface == "tensorflow" and axis is None: + if like == "tensorflow" and axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(np.array(t)) for t in values] axis = 0 - return np.concatenate(values, axis=axis, like=interface) + return np.concatenate(values, axis=axis, like=like) -def diag(values, k=0): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def diag(values, like, k=0): """Construct a diagonal tensor from a list of scalars. Args: @@ -291,15 +290,14 @@ def diag(values, k=0): [0.0000, 0.0000, 0.2000], [0.0000, 0.0000, 0.0000]]) """ - interface = _multi_dispatch(values) - if isinstance(values, (list, tuple)): - values = np.stack(np.coerce(values, like=interface), like=interface) + values = np.stack(np.coerce(values, like=like), like=like) - return np.diag(values, k=k, like=interface) + return np.diag(values, k=k, like=like) -def dot(tensor1, tensor2): +@multi_dispatch(argnum=[0, 1]) +def dot(tensor1, tensor2, like): """Returns the matrix or dot product of two tensors. * If both tensors are 0-dimensional, elementwise multiplication @@ -323,34 +321,34 @@ def dot(tensor1, tensor2): Returns: tensor_like: the matrix or dot product of two tensors """ - interface = _multi_dispatch([tensor1, tensor2]) - x, y = np.coerce([tensor1, tensor2], like=interface) + x, y = np.coerce([tensor1, tensor2], like=like) - if interface == "torch": + if like == "torch": if x.ndim == 0 and y.ndim == 0: return x * y if x.ndim <= 2 and y.ndim <= 2: return x @ y - return np.tensordot(x, y, axes=[[-1], [-2]], like=interface) + return np.tensordot(x, y, axes=[[-1], [-2]], like=like) - if interface == "tensorflow": + if like == "tensorflow": if len(np.shape(x)) == 0 and len(np.shape(y)) == 0: return x * y if len(np.shape(y)) == 1: - return np.tensordot(x, y, axes=[[-1], [0]], like=interface) + return np.tensordot(x, y, axes=[[-1], [0]], like=like) if len(np.shape(x)) == 2 and len(np.shape(y)) == 2: return x @ y - return np.tensordot(x, y, axes=[[-1], [-2]], like=interface) + return np.tensordot(x, y, axes=[[-1], [-2]], like=like) - return np.dot(x, y, like=interface) + return np.dot(x, y, like=like) -def tensordot(tensor1, tensor2, axes=None): +@multi_dispatch(argnum=[0, 1]) +def tensordot(tensor1, tensor2, like, axes=None): """Returns the tensor product of two tensors. In general ``axes`` specifies either the set of axes for both tensors that are contracted (with the first/second entry of ``axes`` @@ -376,11 +374,12 @@ def tensordot(tensor1, tensor2, axes=None): Returns: tensor_like: the tensor product of the two input tensors """ - interface = _multi_dispatch([tensor1, tensor2]) - return np.tensordot(tensor1, tensor2, axes=axes, like=interface) + x, y = np.coerce([tensor1, tensor2], like=like) + return np.tensordot(x, y, axes=axes, like=like) -def get_trainable_indices(values): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def get_trainable_indices(values, like): """Returns a set containing the trainable indices of a sequence of values. @@ -403,10 +402,9 @@ def get_trainable_indices(values): tensor(0.0899685, requires_grad=True) """ trainable = requires_grad - interface = _multi_dispatch(values) trainable_params = set() - if interface == "jax": + if like == "jax": import jax if not any(isinstance(v, jax.core.Tracer) for v in values): @@ -420,7 +418,7 @@ def get_trainable_indices(values): trainable = requires_grad for idx, p in enumerate(values): - if trainable(p, interface=interface): + if trainable(p, interface=like): trainable_params.add(idx) return trainable_params @@ -459,7 +457,8 @@ def ones_like(tensor, dtype=None): return np.ones_like(tensor) -def safe_squeeze(tensor, axis=None, exclude_axis=None): +@multi_dispatch(argnum=[0]) +def safe_squeeze(tensor, like, axis=None, exclude_axis=None): """Squeeze a tensor either along all axes, specified axes or all but a set of excluded axes. For selective squeezing, catch errors and do nothing if the selected axes do not have size 1. @@ -474,8 +473,7 @@ def safe_squeeze(tensor, axis=None, exclude_axis=None): or not excluded and that have size 1. If no axes are specified or excluded, all axes are attempted to be squeezed. """ - interface = _multi_dispatch([tensor]) - if interface == "tensorflow": + if like == "tensorflow": from tensorflow.python.framework.errors_impl import InvalidArgumentError exception = InvalidArgumentError @@ -508,7 +506,8 @@ def safe_squeeze(tensor, axis=None, exclude_axis=None): return tensor -def stack(values, axis=0): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def stack(values, like, axis=0): """Stack a sequence of tensors along the specified axis. .. warning:: @@ -537,9 +536,8 @@ def stack(values, axis=0): [1.00e-01, 2.00e-01, 3.00e-01], [5.00e+00, 8.00e+00, 1.01e+02]], dtype=float32)> """ - interface = _multi_dispatch(values) - values = np.coerce(values, like=interface) - return np.stack(values, axis=axis, like=interface) + values = np.coerce(values, like=like) + return np.stack(values, axis=axis, like=like) def where(condition, x=None, y=None): @@ -612,7 +610,8 @@ def where(condition, x=None, y=None): return np.where(condition, x, y, like=_multi_dispatch([condition, x, y])) -def frobenius_inner_product(A, B, normalize=False): +@multi_dispatch(argnum=[0, 1]) +def frobenius_inner_product(A, B, like, normalize=False): r"""Frobenius inner product between two matrices. .. math:: @@ -637,8 +636,7 @@ def frobenius_inner_product(A, B, normalize=False): >>> qml.math.frobenius_inner_product(A, B) 3.091948202943376 """ - interface = _multi_dispatch([A, B]) - A, B = np.coerce([A, B], like=interface) + A, B = np.coerce([A, B], like=like) inner_product = np.sum(A * B) @@ -649,6 +647,7 @@ def frobenius_inner_product(A, B, normalize=False): return inner_product +@multi_dispatch(argnum=[0, 2]) def scatter_element_add(tensor, index, value, like=None): """In-place addition of a multidimensional value over various indices of a tensor. @@ -682,8 +681,7 @@ def scatter_element_add(tensor, index, value, like=None): if len(np.shape(tensor)) == 0 and index == (): return tensor + value - interface = like or _multi_dispatch([tensor, value]) - return np.scatter_element_add(tensor, index, value, like=interface) + return np.scatter_element_add(tensor, index, value, like=like) def unwrap(values, max_depth=None): From 308ddef93e9c40d7ddfd5c051506a16cd6fcc7e3 Mon Sep 17 00:00:00 2001 From: dwierichs Date: Tue, 18 Jan 2022 10:22:31 +0100 Subject: [PATCH 02/10] tests --- tests/math/test_functions.py | 85 +++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/tests/math/test_functions.py b/tests/math/test_functions.py index 2be238b7f2b..e3227536c25 100644 --- a/tests/math/test_functions.py +++ b/tests/math/test_functions.py @@ -13,6 +13,7 @@ # limitations under the License. """Unit tests for the TensorBox functional API in pennylane.fn.fn """ +from functools import partial import itertools import numpy as onp import pytest @@ -546,7 +547,9 @@ def test_multidimensional_product(self, t1, t2): class TestTensordotTorch: - """Tests for the tensor product function in torch.""" + """Tests for the tensor product function in torch. + This test is required because the functionality of tensordot for Torch + is being patched in PennyLane, as compared to autoray.""" v1 = torch.tensor([0.1, 0.5, -0.9, 1.0, -4.2, 0.1], dtype=torch.float64) v2 = torch.tensor([4.3, -1.2, 8.2, 0.6, -4.2, -11.0], dtype=torch.float64) @@ -761,6 +764,86 @@ def test_tensordot_torch_tensor_vector(self, v, expected, axes): def test_tensordot_torch_tensor_matrix(self, M, expected, axes1, axes2): assert fn.allclose(fn.tensordot(self.T1, M, axes=[axes1, axes2]), expected) +class TestTensordotDifferentiability: + + v0 = np.array([0.1, 5.3, -0.9, 1.1]) + v1 = np.array([0.5, -1.7, -2.9, 0.0]) + v2 = np.array([-0.4, 9.1, 1.6]) + exp_shapes = ((len(v0), len(v2), len(v0)), (len(v0), len(v2), len(v2))) + exp_jacs = (np.zeros(exp_shapes[0]), np.zeros(exp_shapes[1])) + for i in range(len(v0)): + exp_jacs[0][i, : , i] = v2 + for i in range(len(v2)): + exp_jacs[1][:, i , i] = v0 + + def test_autograd(self): + """Tests differentiability of tensordot with Autograd.""" + v0 = np.array(self.v0, requires_grad=True) + v1 = np.array(self.v1, requires_grad=True) + v2 = np.array(self.v2, requires_grad=True) + + # Test inner product + jac = qml.jacobian(partial(fn.tensordot, axes=[0, 0]), argnum=(0, 1))(v0, v1) + assert all(fn.allclose(jac[i], _v) for i, _v in enumerate([v1, v0])) + + # Test outer product + jac = qml.jacobian(partial(fn.tensordot, axes=0), argnum=(0, 1))(v0, v2) + assert all(fn.shape(jac[i]) == self.exp_shapes[i] for i in [0, 1]) + assert all(fn.allclose(jac[i], self.exp_jacs[i]) for i in [0, 1]) + + def test_torch(self): + """Tests differentiability of tensordot with Torch.""" + jac_fn = torch.autograd.functional.jacobian + + v0 = torch.tensor(self.v0, requires_grad=True, dtype=torch.float64) + v1 = torch.tensor(self.v1, requires_grad=True, dtype=torch.float64) + v2 = torch.tensor(self.v2, requires_grad=True, dtype=torch.float64) + + # Test inner product + jac = jac_fn(partial(fn.tensordot, axes=[[0], [0]]), (v0, v1)) + assert all(fn.allclose(jac[i], _v) for i, _v in enumerate([v1, v0])) + + # Test outer product + jac = jac_fn(partial(fn.tensordot, axes=0), (v0, v2)) + assert all(fn.shape(jac[i]) == self.exp_shapes[i] for i in [0, 1]) + assert all(fn.allclose(jac[i], self.exp_jacs[i]) for i in [0, 1]) + + def test_jax(self): + """Tests differentiability of tensordot with JAX.""" + jac_fn = jax.jacobian + + v0 = jnp.array(self.v0) + v1 = jnp.array(self.v1) + v2 = jnp.array(self.v2) + + # Test inner product + jac = jac_fn(partial(fn.tensordot, axes=[[0], [0]]), argnums=(0, 1))(v0, v1) + assert all(fn.allclose(jac[i], _v) for i, _v in enumerate([v1, v0])) + + # Test outer product + jac = jac_fn(partial(fn.tensordot, axes=0), argnums=(0, 1))(v0, v2) + assert all(fn.shape(jac[i]) == self.exp_shapes[i] for i in [0, 1]) + assert all(fn.allclose(jac[i], self.exp_jacs[i]) for i in [0, 1]) + + def test_tensorflow(self): + """Tests differentiability of tensordot with TensorFlow.""" + def jac_fn(func, args): + with tf.GradientTape() as tape: + out = func(*args) + return tape.jacobian(out, args) + + v0 = tf.Variable(self.v0, dtype=tf.float64) + v1 = tf.Variable(self.v1, dtype=tf.float64) + v2 = tf.Variable(self.v2, dtype=tf.float64) + + # Test inner product + jac = jac_fn(partial(fn.tensordot, axes=[[0], [0]]), (v0, v1)) + assert all(fn.allclose(jac[i], _v) for i, _v in enumerate([v1, v0])) + + # Test outer product + jac = jac_fn(partial(fn.tensordot, axes=0), (v0, v2)) + assert all(fn.shape(jac[i]) == self.exp_shapes[i] for i in [0, 1]) + assert all(fn.allclose(jac[i], self.exp_jacs[i]) for i in [0, 1]) # the following test data is of the form # [original shape, axis to expand, new shape] From 270837d31f8786c6ebd9a4ae94020cf16bcea44b Mon Sep 17 00:00:00 2001 From: dwierichs Date: Tue, 18 Jan 2022 10:39:15 +0100 Subject: [PATCH 03/10] more tests --- pennylane/math/multi_dispatch.py | 17 +++++++++++------ tests/math/test_functions.py | 7 +++++-- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/pennylane/math/multi_dispatch.py b/pennylane/math/multi_dispatch.py index 6d16f25c573..a2cdf0f4140 100644 --- a/pennylane/math/multi_dispatch.py +++ b/pennylane/math/multi_dispatch.py @@ -129,8 +129,8 @@ def multi_dispatch(argnum=None, tensor_list=None): arguments of more elaborate custom functions. Here is an example of a ``custom_function`` that computes :math:`c \\sum_i (v_i)^T v_i`, where :math:`v_i` are vectors in ``values`` and - :math:`c` is a fixed ``coefficient``. Note how ``argnum=0`` only points to the first - argument ``values``, how ``tensor_list=0`` indicates that said first argument is a + :math:`c` is a fixed ``coefficient``. Note how ``argnum=0`` only points to the first + argument ``values``, how ``tensor_list=0`` indicates that said first argument is a list of vectors, and that ``coefficient`` is not dispatched. >>> @math.multi_dispatch(argnum=0, tensor_list=0) @@ -178,6 +178,7 @@ def wrapper(*args, **kwargs): return decorator + @multi_dispatch(argnum=[0], tensor_list=[0]) def block_diag(values, like): """Combine a sequence of 2D tensors to form a block diagonal tensor. @@ -255,8 +256,11 @@ def concatenate(values, like, axis=0): return np.concatenate(values, axis=axis, like=like) -@multi_dispatch(argnum=[0], tensor_list=[0]) -def diag(values, like, k=0): +# Note that diag is not eligible for the multi_dispatch decorator because +# it is used sometimes with iterable `values` that need to be interpreted +# as a list of tensors, and sometimes with a single tensor `values` that +# might not be iterable (for example a TensorFlow `Variable`) +def diag(values, k=0): """Construct a diagonal tensor from a list of scalars. Args: @@ -290,10 +294,11 @@ def diag(values, like, k=0): [0.0000, 0.0000, 0.2000], [0.0000, 0.0000, 0.0000]]) """ + interface = _multi_dispatch(values) if isinstance(values, (list, tuple)): - values = np.stack(np.coerce(values, like=like), like=like) + values = np.stack(np.coerce(values, like=interface), like=interface) - return np.diag(values, k=k, like=like) + return np.diag(values, k=k, like=interface) @multi_dispatch(argnum=[0, 1]) diff --git a/tests/math/test_functions.py b/tests/math/test_functions.py index e3227536c25..e94d65b6e9f 100644 --- a/tests/math/test_functions.py +++ b/tests/math/test_functions.py @@ -764,6 +764,7 @@ def test_tensordot_torch_tensor_vector(self, v, expected, axes): def test_tensordot_torch_tensor_matrix(self, M, expected, axes1, axes2): assert fn.allclose(fn.tensordot(self.T1, M, axes=[axes1, axes2]), expected) + class TestTensordotDifferentiability: v0 = np.array([0.1, 5.3, -0.9, 1.1]) @@ -772,9 +773,9 @@ class TestTensordotDifferentiability: exp_shapes = ((len(v0), len(v2), len(v0)), (len(v0), len(v2), len(v2))) exp_jacs = (np.zeros(exp_shapes[0]), np.zeros(exp_shapes[1])) for i in range(len(v0)): - exp_jacs[0][i, : , i] = v2 + exp_jacs[0][i, :, i] = v2 for i in range(len(v2)): - exp_jacs[1][:, i , i] = v0 + exp_jacs[1][:, i, i] = v0 def test_autograd(self): """Tests differentiability of tensordot with Autograd.""" @@ -827,6 +828,7 @@ def test_jax(self): def test_tensorflow(self): """Tests differentiability of tensordot with TensorFlow.""" + def jac_fn(func, args): with tf.GradientTape() as tape: out = func(*args) @@ -845,6 +847,7 @@ def jac_fn(func, args): assert all(fn.shape(jac[i]) == self.exp_shapes[i] for i in [0, 1]) assert all(fn.allclose(jac[i], self.exp_jacs[i]) for i in [0, 1]) + # the following test data is of the form # [original shape, axis to expand, new shape] expand_dims_test_data = [ From fe6b0278948d0000813a1acc3db58d5fbb4bf379 Mon Sep 17 00:00:00 2001 From: dwierichs Date: Tue, 18 Jan 2022 11:14:52 +0100 Subject: [PATCH 04/10] changelog --- doc/releases/changelog-dev.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 94016a23a99..82f20eeb7b7 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -323,10 +323,13 @@ [(#2063)](https://github.com/PennyLaneAI/pennylane/pull/2063) * Added a new `multi_dispatch` decorator that helps ease the definition of new functions - inside PennyLane. We can decorate the function, indicating the arguments that are - tensors handled by the interface: + inside PennyLane. The decorator is used throughout the math module, demonstrating use cases. [(#2082)](https://github.com/PennyLaneAI/pennylane/pull/2084) - + [(#2096)](https://github.com/PennyLaneAI/pennylane/pull/2096) + + We can decorate a function, indicating the arguments that are + tensors handled by the interface: + ```pycon >>> @qml.math.multi_dispatch(argnum=[0, 1]) ... def some_function(tensor1, tensor2, option, like): From ea4296b12260418a5279303a5abcd68b1da5907f Mon Sep 17 00:00:00 2001 From: dwierichs Date: Tue, 18 Jan 2022 11:19:06 +0100 Subject: [PATCH 05/10] filename type --- tests/math/test_multi_disptach.py | 109 ------------------------------ 1 file changed, 109 deletions(-) delete mode 100644 tests/math/test_multi_disptach.py diff --git a/tests/math/test_multi_disptach.py b/tests/math/test_multi_disptach.py deleted file mode 100644 index 406b68fb094..00000000000 --- a/tests/math/test_multi_disptach.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2018-2020 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Assertion test for multi_dispatch function/decorator -""" -import autoray -import numpy as onp -import pytest -from autoray import numpy as anp -from pennylane import numpy as np -from pennylane import math as fn - - -tf = pytest.importorskip("tensorflow", minversion="2.1") -torch = pytest.importorskip("torch") -jax = pytest.importorskip("jax") - -test_multi_dispatch_stack_data = [ - [[1.0, 0.0], [2.0, 3.0]], - ([1.0, 0.0], [2.0, 3.0]), - onp.array([[1.0, 0.0], [2.0, 3.0]]), - anp.array([[1.0, 0.0], [2.0, 3.0]]), - np.array([[1.0, 0.0], [2.0, 3.0]]), - jax.numpy.array([[1.0, 0.0], [2.0, 3.0]]), - tf.constant([[1.0, 0.0], [2.0, 3.0]]), -] - - -@pytest.mark.parametrize("x", test_multi_dispatch_stack_data) -def test_multi_dispatch_stack(x): - """Test that the decorated autoray function stack can handle all inputs""" - stack = fn.multi_dispatch(argnum=0, tensor_list=0)(autoray.numpy.stack) - res = stack(x) - assert fn.allequal(res, [[1.0, 0.0], [2.0, 3.0]]) - - -@pytest.mark.parametrize("x", test_multi_dispatch_stack_data) -def test_multi_dispatch_decorate(x): - """Test decorating a standard numpy function for PennyLane""" - - @fn.multi_dispatch(argnum=[0], tensor_list=[0]) - def tensordot(x, like, axes=None): - return np.tensordot(x[0], x[1], axes=axes) - - assert fn.allequal(tensordot(x, axes=(0, 0)).numpy(), 2) - - -test_data0 = [ - (1, 2, 3), - [1, 2, 3], - onp.array([1, 2, 3]), - anp.array([1, 2, 3]), - np.array([1, 2, 3]), - torch.tensor([1, 2, 3]), - jax.numpy.array([1, 2, 3]), - tf.constant([1, 2, 3]), -] - -test_data = [(x, x) for x in test_data0] - - -@pytest.mark.parametrize("t1,t2", test_data) -def test_multi_dispatch_decorate_argnum_none(t1, t2): - """Test decorating a standard numpy function for PennyLane, automatically dispatching all inputs by choosing argnum=None""" - - @fn.multi_dispatch(argnum=None, tensor_list=None) - def tensordot(tensor1, tensor2, like, axes=None): - return np.tensordot(tensor1, tensor2, axes=axes) - - assert fn.allequal(tensordot(t1, t2, axes=(0, 0)).numpy(), 14) - - -test_data_values = [ - [[1, 2, 3] for _ in range(5)], - [(1, 2, 3) for _ in range(5)], - [np.array([1, 2, 3]) for _ in range(5)], - [onp.array([1, 2, 3]) for _ in range(5)], - [anp.array([1, 2, 3]) for _ in range(5)], - [torch.tensor([1, 2, 3]) for _ in range(5)], - [jax.numpy.array([1, 2, 3]) for _ in range(5)], - [tf.constant([1, 2, 3]) for _ in range(5)], -] - - -@pytest.mark.parametrize("values", test_data_values) -def test_multi_dispatch_decorate_non_dispatch(values): - """Test decorating a custom function for PennyLane including a non-dispatchable parameter""" - - @fn.multi_dispatch(argnum=0, tensor_list=0) - def custom_function(values, like, coefficient=10): - """ - A dummy custom function that computes coeff :math:`c \\sum_i (v_i)^T v_i` where :math:`v_i` are vectors in ``values`` - and :math:`c` is a fixed ``coefficient``. - values is a list of vectors - like can force the interface (optional) - """ - return coefficient * np.sum([fn.dot(v, v) for v in values]) - - assert fn.allequal(custom_function(values), 700) From f02fc563f8b883c4a0f12eaeb334ff82042760aa Mon Sep 17 00:00:00 2001 From: dwierichs Date: Tue, 18 Jan 2022 11:19:21 +0100 Subject: [PATCH 06/10] filename typo --- tests/math/test_multi_dispatch.py | 109 ++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 tests/math/test_multi_dispatch.py diff --git a/tests/math/test_multi_dispatch.py b/tests/math/test_multi_dispatch.py new file mode 100644 index 00000000000..406b68fb094 --- /dev/null +++ b/tests/math/test_multi_dispatch.py @@ -0,0 +1,109 @@ +# Copyright 2018-2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Assertion test for multi_dispatch function/decorator +""" +import autoray +import numpy as onp +import pytest +from autoray import numpy as anp +from pennylane import numpy as np +from pennylane import math as fn + + +tf = pytest.importorskip("tensorflow", minversion="2.1") +torch = pytest.importorskip("torch") +jax = pytest.importorskip("jax") + +test_multi_dispatch_stack_data = [ + [[1.0, 0.0], [2.0, 3.0]], + ([1.0, 0.0], [2.0, 3.0]), + onp.array([[1.0, 0.0], [2.0, 3.0]]), + anp.array([[1.0, 0.0], [2.0, 3.0]]), + np.array([[1.0, 0.0], [2.0, 3.0]]), + jax.numpy.array([[1.0, 0.0], [2.0, 3.0]]), + tf.constant([[1.0, 0.0], [2.0, 3.0]]), +] + + +@pytest.mark.parametrize("x", test_multi_dispatch_stack_data) +def test_multi_dispatch_stack(x): + """Test that the decorated autoray function stack can handle all inputs""" + stack = fn.multi_dispatch(argnum=0, tensor_list=0)(autoray.numpy.stack) + res = stack(x) + assert fn.allequal(res, [[1.0, 0.0], [2.0, 3.0]]) + + +@pytest.mark.parametrize("x", test_multi_dispatch_stack_data) +def test_multi_dispatch_decorate(x): + """Test decorating a standard numpy function for PennyLane""" + + @fn.multi_dispatch(argnum=[0], tensor_list=[0]) + def tensordot(x, like, axes=None): + return np.tensordot(x[0], x[1], axes=axes) + + assert fn.allequal(tensordot(x, axes=(0, 0)).numpy(), 2) + + +test_data0 = [ + (1, 2, 3), + [1, 2, 3], + onp.array([1, 2, 3]), + anp.array([1, 2, 3]), + np.array([1, 2, 3]), + torch.tensor([1, 2, 3]), + jax.numpy.array([1, 2, 3]), + tf.constant([1, 2, 3]), +] + +test_data = [(x, x) for x in test_data0] + + +@pytest.mark.parametrize("t1,t2", test_data) +def test_multi_dispatch_decorate_argnum_none(t1, t2): + """Test decorating a standard numpy function for PennyLane, automatically dispatching all inputs by choosing argnum=None""" + + @fn.multi_dispatch(argnum=None, tensor_list=None) + def tensordot(tensor1, tensor2, like, axes=None): + return np.tensordot(tensor1, tensor2, axes=axes) + + assert fn.allequal(tensordot(t1, t2, axes=(0, 0)).numpy(), 14) + + +test_data_values = [ + [[1, 2, 3] for _ in range(5)], + [(1, 2, 3) for _ in range(5)], + [np.array([1, 2, 3]) for _ in range(5)], + [onp.array([1, 2, 3]) for _ in range(5)], + [anp.array([1, 2, 3]) for _ in range(5)], + [torch.tensor([1, 2, 3]) for _ in range(5)], + [jax.numpy.array([1, 2, 3]) for _ in range(5)], + [tf.constant([1, 2, 3]) for _ in range(5)], +] + + +@pytest.mark.parametrize("values", test_data_values) +def test_multi_dispatch_decorate_non_dispatch(values): + """Test decorating a custom function for PennyLane including a non-dispatchable parameter""" + + @fn.multi_dispatch(argnum=0, tensor_list=0) + def custom_function(values, like, coefficient=10): + """ + A dummy custom function that computes coeff :math:`c \\sum_i (v_i)^T v_i` where :math:`v_i` are vectors in ``values`` + and :math:`c` is a fixed ``coefficient``. + values is a list of vectors + like can force the interface (optional) + """ + return coefficient * np.sum([fn.dot(v, v) for v in values]) + + assert fn.allequal(custom_function(values), 700) From cd8ac5a9586a4a24d013aaf90e2386bb3e0d989f Mon Sep 17 00:00:00 2001 From: dwierichs Date: Tue, 18 Jan 2022 15:36:39 +0100 Subject: [PATCH 07/10] intermediate --- pennylane/devices/default_qubit.py | 2 +- pennylane/math/multi_dispatch.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pennylane/devices/default_qubit.py b/pennylane/devices/default_qubit.py index 1c8828d8cb3..0de823d56a8 100644 --- a/pennylane/devices/default_qubit.py +++ b/pennylane/devices/default_qubit.py @@ -599,7 +599,7 @@ def density_matrix(self, wires): # Return the full density matrix by using numpy tensor product if wires == self.wires: - density_matrix = self._tensordot(state, self._conj(state), 0) + density_matrix = self._tensordot(state, self._conj(state), axes=0) density_matrix = self._reshape(density_matrix, (2 ** len(wires), 2 ** len(wires))) return density_matrix diff --git a/pennylane/math/multi_dispatch.py b/pennylane/math/multi_dispatch.py index a2cdf0f4140..8b1c2ad506a 100644 --- a/pennylane/math/multi_dispatch.py +++ b/pennylane/math/multi_dispatch.py @@ -180,7 +180,7 @@ def wrapper(*args, **kwargs): @multi_dispatch(argnum=[0], tensor_list=[0]) -def block_diag(values, like): +def block_diag(values, like=None): """Combine a sequence of 2D tensors to form a block diagonal tensor. Args: @@ -209,7 +209,7 @@ def block_diag(values, like): @multi_dispatch(argnum=[0], tensor_list=[0]) -def concatenate(values, like, axis=0): +def concatenate(values, axis=0, like=None): """Concatenate a sequence of tensors along the specified axis. .. warning:: @@ -302,7 +302,7 @@ def diag(values, k=0): @multi_dispatch(argnum=[0, 1]) -def dot(tensor1, tensor2, like): +def dot(tensor1, tensor2, like=None): """Returns the matrix or dot product of two tensors. * If both tensors are 0-dimensional, elementwise multiplication @@ -353,7 +353,7 @@ def dot(tensor1, tensor2, like): @multi_dispatch(argnum=[0, 1]) -def tensordot(tensor1, tensor2, like, axes=None): +def tensordot(tensor1, tensor2, axes=None, like=None): """Returns the tensor product of two tensors. In general ``axes`` specifies either the set of axes for both tensors that are contracted (with the first/second entry of ``axes`` @@ -384,7 +384,7 @@ def tensordot(tensor1, tensor2, like, axes=None): @multi_dispatch(argnum=[0], tensor_list=[0]) -def get_trainable_indices(values, like): +def get_trainable_indices(values, like=None): """Returns a set containing the trainable indices of a sequence of values. @@ -463,7 +463,7 @@ def ones_like(tensor, dtype=None): @multi_dispatch(argnum=[0]) -def safe_squeeze(tensor, like, axis=None, exclude_axis=None): +def safe_squeeze(tensor, axis=None, exclude_axis=None, like=None): """Squeeze a tensor either along all axes, specified axes or all but a set of excluded axes. For selective squeezing, catch errors and do nothing if the selected axes do not have size 1. @@ -512,7 +512,7 @@ def safe_squeeze(tensor, like, axis=None, exclude_axis=None): @multi_dispatch(argnum=[0], tensor_list=[0]) -def stack(values, like, axis=0): +def stack(values, axis=0, like=None): """Stack a sequence of tensors along the specified axis. .. warning:: @@ -616,7 +616,7 @@ def where(condition, x=None, y=None): @multi_dispatch(argnum=[0, 1]) -def frobenius_inner_product(A, B, like, normalize=False): +def frobenius_inner_product(A, B, normalize=False, like=None): r"""Frobenius inner product between two matrices. .. math:: From 2c7ef26ac264b7065f7d7197f7525bfab2f071e5 Mon Sep 17 00:00:00 2001 From: dwierichs Date: Wed, 19 Jan 2022 00:48:53 +0100 Subject: [PATCH 08/10] tmp check --- pennylane/math/multi_dispatch.py | 92 ++++++++++++++++---------------- tests/math/test_functions.py | 18 ++++--- 2 files changed, 56 insertions(+), 54 deletions(-) diff --git a/pennylane/math/multi_dispatch.py b/pennylane/math/multi_dispatch.py index 8b1c2ad506a..376c42fcead 100644 --- a/pennylane/math/multi_dispatch.py +++ b/pennylane/math/multi_dispatch.py @@ -98,7 +98,7 @@ def multi_dispatch(argnum=None, tensor_list=None): Args: - argnum (list[int]): A list of integers indicating the indices + argnum (list[int]): A list of integers indicating indicating the indices to dispatch (i.e., the arguments that are tensors handled by an interface). If ``None``, dispatch over all arguments. tensor_lists (list[int]): a list of integers indicating which indices @@ -126,12 +126,12 @@ def multi_dispatch(argnum=None, tensor_list=None): >>> stack = multi_dispatch(argnum=0, tensor_list=0)(autoray.numpy.stack) We can also use the ``multi_dispatch`` decorator to dispatch - arguments of more elaborate custom functions. Here is an example + arguments of more more elaborate custom functions. Here is an example of a ``custom_function`` that computes :math:`c \\sum_i (v_i)^T v_i`, where :math:`v_i` are vectors in ``values`` and - :math:`c` is a fixed ``coefficient``. Note how ``argnum=0`` only points to the first - argument ``values``, how ``tensor_list=0`` indicates that said first argument is a - list of vectors, and that ``coefficient`` is not dispatched. + :math:`c` is a fixed ``coefficient``. Note how ``argnum=0`` only points to the first argument ``values``, + how ``tensor_list=0`` indicates that said first argument is a list of vectors, and that ``coefficient`` is not + dispatched. >>> @math.multi_dispatch(argnum=0, tensor_list=0) >>> def custom_function(values, like, coefficient=10): @@ -179,8 +179,7 @@ def wrapper(*args, **kwargs): return decorator -@multi_dispatch(argnum=[0], tensor_list=[0]) -def block_diag(values, like=None): +def block_diag(values): """Combine a sequence of 2D tensors to form a block diagonal tensor. Args: @@ -204,12 +203,12 @@ def block_diag(values, like=None): [ 0, 0, -1, -6, -3, 0], [ 0, 0, 0, 0, 0, 5]]) """ - values = np.coerce(values, like=like) - return np.block_diag(values, like=like) + interface = _multi_dispatch(values) + values = np.coerce(values, like=interface) + return np.block_diag(values, like=interface) -@multi_dispatch(argnum=[0], tensor_list=[0]) -def concatenate(values, axis=0, like=None): +def concatenate(values, axis=0): """Concatenate a sequence of tensors along the specified axis. .. warning:: @@ -236,7 +235,9 @@ def concatenate(values, axis=0, like=None): """ - if like == "torch": + interface = _multi_dispatch(values) + + if interface == "torch": import torch if axis is None: @@ -247,19 +248,15 @@ def concatenate(values, axis=0, like=None): else: values = [torch.as_tensor(t) for t in values] - if like == "tensorflow" and axis is None: + if interface == "tensorflow" and axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(np.array(t)) for t in values] axis = 0 - return np.concatenate(values, axis=axis, like=like) + return np.concatenate(values, axis=axis, like=interface) -# Note that diag is not eligible for the multi_dispatch decorator because -# it is used sometimes with iterable `values` that need to be interpreted -# as a list of tensors, and sometimes with a single tensor `values` that -# might not be iterable (for example a TensorFlow `Variable`) def diag(values, k=0): """Construct a diagonal tensor from a list of scalars. @@ -295,14 +292,14 @@ def diag(values, k=0): [0.0000, 0.0000, 0.0000]]) """ interface = _multi_dispatch(values) + if isinstance(values, (list, tuple)): values = np.stack(np.coerce(values, like=interface), like=interface) return np.diag(values, k=k, like=interface) -@multi_dispatch(argnum=[0, 1]) -def dot(tensor1, tensor2, like=None): +def dot(tensor1, tensor2): """Returns the matrix or dot product of two tensors. * If both tensors are 0-dimensional, elementwise multiplication @@ -326,34 +323,34 @@ def dot(tensor1, tensor2, like=None): Returns: tensor_like: the matrix or dot product of two tensors """ - x, y = np.coerce([tensor1, tensor2], like=like) + interface = _multi_dispatch([tensor1, tensor2]) + x, y = np.coerce([tensor1, tensor2], like=interface) - if like == "torch": + if interface == "torch": if x.ndim == 0 and y.ndim == 0: return x * y if x.ndim <= 2 and y.ndim <= 2: return x @ y - return np.tensordot(x, y, axes=[[-1], [-2]], like=like) + return np.tensordot(x, y, axes=[[-1], [-2]], like=interface) - if like == "tensorflow": + if interface == "tensorflow": if len(np.shape(x)) == 0 and len(np.shape(y)) == 0: return x * y if len(np.shape(y)) == 1: - return np.tensordot(x, y, axes=[[-1], [0]], like=like) + return np.tensordot(x, y, axes=[[-1], [0]], like=interface) if len(np.shape(x)) == 2 and len(np.shape(y)) == 2: return x @ y - return np.tensordot(x, y, axes=[[-1], [-2]], like=like) + return np.tensordot(x, y, axes=[[-1], [-2]], like=interface) - return np.dot(x, y, like=like) + return np.dot(x, y, like=interface) -@multi_dispatch(argnum=[0, 1]) -def tensordot(tensor1, tensor2, axes=None, like=None): +def tensordot(tensor1, tensor2, axes=None): """Returns the tensor product of two tensors. In general ``axes`` specifies either the set of axes for both tensors that are contracted (with the first/second entry of ``axes`` @@ -379,12 +376,12 @@ def tensordot(tensor1, tensor2, axes=None, like=None): Returns: tensor_like: the tensor product of the two input tensors """ - x, y = np.coerce([tensor1, tensor2], like=like) - return np.tensordot(x, y, axes=axes, like=like) + interface = _multi_dispatch([tensor1, tensor2]) + tensor1, tensor2 = np.coerce([tensor1, tensor2], like=interface) + return np.tensordot(tensor1, tensor2, axes=axes, like=interface) -@multi_dispatch(argnum=[0], tensor_list=[0]) -def get_trainable_indices(values, like=None): +def get_trainable_indices(values): """Returns a set containing the trainable indices of a sequence of values. @@ -407,9 +404,10 @@ def get_trainable_indices(values, like=None): tensor(0.0899685, requires_grad=True) """ trainable = requires_grad + interface = _multi_dispatch(values) trainable_params = set() - if like == "jax": + if interface == "jax": import jax if not any(isinstance(v, jax.core.Tracer) for v in values): @@ -423,7 +421,7 @@ def get_trainable_indices(values, like=None): trainable = requires_grad for idx, p in enumerate(values): - if trainable(p, interface=like): + if trainable(p, interface=interface): trainable_params.add(idx) return trainable_params @@ -462,8 +460,7 @@ def ones_like(tensor, dtype=None): return np.ones_like(tensor) -@multi_dispatch(argnum=[0]) -def safe_squeeze(tensor, axis=None, exclude_axis=None, like=None): +def safe_squeeze(tensor, axis=None, exclude_axis=None): """Squeeze a tensor either along all axes, specified axes or all but a set of excluded axes. For selective squeezing, catch errors and do nothing if the selected axes do not have size 1. @@ -478,7 +475,8 @@ def safe_squeeze(tensor, axis=None, exclude_axis=None, like=None): or not excluded and that have size 1. If no axes are specified or excluded, all axes are attempted to be squeezed. """ - if like == "tensorflow": + interface = _multi_dispatch([tensor]) + if interface == "tensorflow": from tensorflow.python.framework.errors_impl import InvalidArgumentError exception = InvalidArgumentError @@ -511,8 +509,7 @@ def safe_squeeze(tensor, axis=None, exclude_axis=None, like=None): return tensor -@multi_dispatch(argnum=[0], tensor_list=[0]) -def stack(values, axis=0, like=None): +def stack(values, axis=0): """Stack a sequence of tensors along the specified axis. .. warning:: @@ -541,8 +538,9 @@ def stack(values, axis=0, like=None): [1.00e-01, 2.00e-01, 3.00e-01], [5.00e+00, 8.00e+00, 1.01e+02]], dtype=float32)> """ - values = np.coerce(values, like=like) - return np.stack(values, axis=axis, like=like) + interface = _multi_dispatch(values) + values = np.coerce(values, like=interface) + return np.stack(values, axis=axis, like=interface) def where(condition, x=None, y=None): @@ -615,8 +613,7 @@ def where(condition, x=None, y=None): return np.where(condition, x, y, like=_multi_dispatch([condition, x, y])) -@multi_dispatch(argnum=[0, 1]) -def frobenius_inner_product(A, B, normalize=False, like=None): +def frobenius_inner_product(A, B, normalize=False): r"""Frobenius inner product between two matrices. .. math:: @@ -641,7 +638,8 @@ def frobenius_inner_product(A, B, normalize=False, like=None): >>> qml.math.frobenius_inner_product(A, B) 3.091948202943376 """ - A, B = np.coerce([A, B], like=like) + interface = _multi_dispatch([A, B]) + A, B = np.coerce([A, B], like=interface) inner_product = np.sum(A * B) @@ -652,7 +650,6 @@ def frobenius_inner_product(A, B, normalize=False, like=None): return inner_product -@multi_dispatch(argnum=[0, 2]) def scatter_element_add(tensor, index, value, like=None): """In-place addition of a multidimensional value over various indices of a tensor. @@ -686,7 +683,8 @@ def scatter_element_add(tensor, index, value, like=None): if len(np.shape(tensor)) == 0 and index == (): return tensor + value - return np.scatter_element_add(tensor, index, value, like=like) + interface = like or _multi_dispatch([tensor, value]) + return np.scatter_element_add(tensor, index, value, like=interface) def unwrap(values, max_depth=None): diff --git a/tests/math/test_functions.py b/tests/math/test_functions.py index e94d65b6e9f..0807d3bc4a7 100644 --- a/tests/math/test_functions.py +++ b/tests/math/test_functions.py @@ -323,7 +323,8 @@ class TestConcatenate: """Tests for the concatenate function""" def test_concatenate_array(self): - """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension""" + """Test that concatenate, called without the axis arguments, + concatenates across the 0th dimension""" t1 = [0.6, 0.1, 0.6] t2 = np.array([0.1, 0.2, 0.3]) t3 = onp.array([5.0, 8.0, 101.0]) @@ -333,7 +334,8 @@ def test_concatenate_array(self): assert np.all(res == np.concatenate([t1, t2, t3])) def test_concatenate_jax(self): - """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension""" + """Test that concatenate, called without the axis arguments, + concatenates across the 0th dimension""" t1 = jnp.array([5.0, 8.0, 101.0]) t2 = jnp.array([0.6, 0.1, 0.6]) t3 = jnp.array([0.1, 0.2, 0.3]) @@ -341,8 +343,9 @@ def test_concatenate_jax(self): res = fn.concatenate([t1, t2, t3]) assert jnp.all(res == jnp.concatenate([t1, t2, t3])) - def test_stack_tensorflow(self): - """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension""" + def test_concatenate_tensorflow(self): + """Test that concatenate, called without the axis arguments, + concatenates across the 0th dimension""" t1 = tf.constant([0.6, 0.1, 0.6]) t2 = tf.Variable([0.1, 0.2, 0.3]) t3 = onp.array([5.0, 8.0, 101.0]) @@ -351,8 +354,9 @@ def test_stack_tensorflow(self): assert isinstance(res, tf.Tensor) assert np.all(res.numpy() == np.concatenate([t1.numpy(), t2.numpy(), t3])) - def test_stack_torch(self): - """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension""" + def test_concatenate_torch(self): + """Test that concatenate, called without the axis arguments, + concatenates across the 0th dimension""" t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64) t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64) t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64) @@ -364,7 +368,7 @@ def test_stack_torch(self): @pytest.mark.parametrize( "t1", [onp.array([[1], [2]]), torch.tensor([[1], [2]]), tf.constant([[1], [2]])] ) - def test_stack_axis(self, t1): + def test_concatenate_axis(self, t1): """Test that passing the axis argument allows for concatenating along a different axis""" t2 = onp.array([[3], [4]]) From 12abf980aaa20c111ceccb22d0c0b58858e32fe5 Mon Sep 17 00:00:00 2001 From: dwierichs Date: Wed, 19 Jan 2022 01:43:31 +0100 Subject: [PATCH 09/10] reduce warnings across math module test suite --- pennylane/math/is_independent.py | 2 +- tests/math/test_basic_math.py | 2 +- tests/math/test_functions.py | 28 ++++++++++++++++------------ tests/math/test_is_independent.py | 8 ++++---- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/pennylane/math/is_independent.py b/pennylane/math/is_independent.py index 5a2bea1e67e..302b5bb7792 100644 --- a/pennylane/math/is_independent.py +++ b/pennylane/math/is_independent.py @@ -203,7 +203,7 @@ def _get_random_args(args, interface, num, seed, bounds): if interface == "autograd": # Mark the arguments as trainable with Autograd - rnd_args = pnp.array(rnd_args, requires_grad=True) + rnd_args = [tuple(pnp.array(a, requires_grad=True) for a in arg) for arg in rnd_args] return rnd_args diff --git a/tests/math/test_basic_math.py b/tests/math/test_basic_math.py index 9a098eb9fea..a592749a283 100644 --- a/tests/math/test_basic_math.py +++ b/tests/math/test_basic_math.py @@ -115,7 +115,7 @@ def test_frobenius_inner_product(self, A, B, normalize, expected): def test_frobenius_inner_product_gradient(self): """Test that the calculated gradient is correct.""" - A = np.array([[1.0, 2.3], [-1.3, 2.4]]) + A = onp.array([[1.0, 2.3], [-1.3, 2.4]]) B = torch.autograd.Variable(torch.randn(2, 2).type(torch.float), requires_grad=True) result = fn.frobenius_inner_product(A, B) result.backward() diff --git a/tests/math/test_functions.py b/tests/math/test_functions.py index 0807d3bc4a7..8434d7b7313 100644 --- a/tests/math/test_functions.py +++ b/tests/math/test_functions.py @@ -1452,14 +1452,14 @@ def test_array(self): x = np.array(self.x, requires_grad=True) y = np.array(self.y, requires_grad=True) - def cost(weights): + def cost(*weights): return fn.scatter_element_add(weights[0], self.index, weights[1] ** 2) - res = cost([x, y]) + res = cost(x, y) assert isinstance(res, np.ndarray) assert fn.allclose(res, self.expected_val) - grad = qml.grad(lambda weights: cost(weights)[self.index[0], self.index[1]])([x, y]) + grad = qml.grad(lambda *weights: cost(*weights)[self.index[0], self.index[1]])(x, y) assert fn.allclose(grad[0], self.expected_grad_x) assert fn.allclose(grad[1], self.expected_grad_y) @@ -1582,20 +1582,20 @@ def test_array(self): x = np.array(self.x, requires_grad=True) y = np.array(self.y, requires_grad=True) - def cost(weights): + def cost(*weights): return fn.scatter_element_add( weights[0], self.indices, [fn.sin(weights[1] / 2), weights[1] ** 2] ) - res = cost([x, y]) + res = cost(x, y) assert isinstance(res, np.ndarray) assert fn.allclose(res, self.expected_val) scalar_cost = ( - lambda weights: cost(weights)[self.indices[0][0], self.indices[1][0]] - + cost(weights)[self.indices[0][1], self.indices[1][1]] + lambda *weights: cost(*weights)[self.indices[0][0], self.indices[1][0]] + + cost(*weights)[self.indices[0][1], self.indices[1][1]] ) - grad = qml.grad(scalar_cost)([x, y]) + grad = qml.grad(scalar_cost)(x, y) assert fn.allclose(grad[0], self.expected_grad_x) assert fn.allclose(grad[1], self.expected_grad_y) @@ -1961,7 +1961,7 @@ def test_autograd(self): np.array([[x, 1.2 * y], [x ** 2 - y / 3, -x / y]]), ] f = lambda x, y: fn.block_diag(tensors(x, y)) - x, y = 0.2, 1.5 + x, y = np.array([0.2, 1.5], requires_grad=True) res = qml.jacobian(f)(x, y) exp = self.expected(x, y) assert fn.allclose(res[0], exp[0]) @@ -2119,12 +2119,16 @@ def cost_fn(*params): unwrapped_params = qml.math.unwrap(params) return np.sum(np.sin(params[0] * params[2])) + params[1] - values = [onp.array([0.1, 0.2]), np.tensor(0.1, dtype=np.float64), np.tensor([0.5, 0.2])] - grad = qml.grad(cost_fn)(*values) + values = [ + onp.array([0.1, 0.2]), + np.tensor(0.1, dtype=np.float64, requires_grad=True), + np.tensor([0.5, 0.2], requires_grad=True), + ] + grad = qml.grad(cost_fn, argnum=[1, 2])(*values) expected = [np.array([0.1, 0.2]), 0.1, np.array([0.5, 0.2])] assert all(np.allclose(a, b) for a, b in zip(unwrapped_params, expected)) - assert all(not isinstance(a, ArrayBox) for a in unwrapped_params) + assert not any(isinstance(a, ArrayBox) for a in unwrapped_params) def test_autograd_unwrapping_backward_nested(self): """Test that a sequence of Autograd values is properly unwrapped diff --git a/tests/math/test_is_independent.py b/tests/math/test_is_independent.py index 6953b584df8..cc2af6e6a6f 100644 --- a/tests/math/test_is_independent.py +++ b/tests/math/test_is_independent.py @@ -56,8 +56,8 @@ lambda x: x if abs(x) < 1e-5 else 0.0, # x*delta for x=0 is okay lambda x: 1.0 if x > 0 else 0.0, # Heaviside is okay numerically lambda x: 1.0 if x > 0 else 0.0, # Heaviside is okay numerically - lambda x: qml.math.log(1 + qml.math.exp(1000.0 * x)) / 1000.0, # Softplus is okay - lambda x: qml.math.log(1 + qml.math.exp(1000.0 * x)) / 1000.0, # Softplus is okay + lambda x: qml.math.log(1 + qml.math.exp(100.0 * x)) / 100.0, # Softplus is okay + lambda x: qml.math.log(1 + qml.math.exp(100.0 * x)) / 100.0, # Softplus is okay ] args_dependent_lambdas = [ @@ -167,7 +167,7 @@ def dependent_circuit(x, y, z): dependent_circuit, np.array, lambda x: np.array(x * 0.0), - lambda x: (1 + qml.math.tanh(1000 * x)) / 2, + lambda x: (1 + qml.math.tanh(100 * x)) / 2, *dependent_lambdas, ] @@ -519,7 +519,7 @@ def dependent_circuit(x, y, z): dependent_functions = [ dependent_circuit, - torch.tensor, + torch.as_tensor, lambda x: (1 + qml.math.tanh(1000 * x)) / 2, *dependent_lambdas, ] From 3afc8846196f7160bc60a9c96acd0219e9e2f67c Mon Sep 17 00:00:00 2001 From: dwierichs Date: Wed, 19 Jan 2022 10:52:55 +0100 Subject: [PATCH 10/10] include multi_dispatch applications, again. --- pennylane/math/multi_dispatch.py | 92 ++++++++++++++++---------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/pennylane/math/multi_dispatch.py b/pennylane/math/multi_dispatch.py index 376c42fcead..4b8a2d220cb 100644 --- a/pennylane/math/multi_dispatch.py +++ b/pennylane/math/multi_dispatch.py @@ -102,8 +102,9 @@ def multi_dispatch(argnum=None, tensor_list=None): to dispatch (i.e., the arguments that are tensors handled by an interface). If ``None``, dispatch over all arguments. tensor_lists (list[int]): a list of integers indicating which indices - in ``argnum`` are expected to be lists of tensors. - If ``None``, this option is ignored. + in ``argnum`` are expected to be lists of tensors. If an argument + marked as tensor list is not a ``tuple`` or ``list``, it is treated + as if it was not marked as tensor list. If ``None``, this option is ignored. Returns: func: A wrapped version of the function, which will automatically attempt @@ -163,7 +164,9 @@ def wrapper(*args, **kwargs): dispatch_args = [] for a in argnums: - if a in tensor_lists: + # Only use extend if the marked argument really + # is a (native) python Sequence + if a in tensor_lists and isinstance(args[a], (list, tuple)): dispatch_args.extend(args[a]) else: dispatch_args.append(args[a]) @@ -179,7 +182,8 @@ def wrapper(*args, **kwargs): return decorator -def block_diag(values): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def block_diag(values, like=None): """Combine a sequence of 2D tensors to form a block diagonal tensor. Args: @@ -203,12 +207,12 @@ def block_diag(values): [ 0, 0, -1, -6, -3, 0], [ 0, 0, 0, 0, 0, 5]]) """ - interface = _multi_dispatch(values) - values = np.coerce(values, like=interface) - return np.block_diag(values, like=interface) + values = np.coerce(values, like=like) + return np.block_diag(values, like=like) -def concatenate(values, axis=0): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def concatenate(values, axis=0, like=None): """Concatenate a sequence of tensors along the specified axis. .. warning:: @@ -235,9 +239,7 @@ def concatenate(values, axis=0): """ - interface = _multi_dispatch(values) - - if interface == "torch": + if like == "torch": import torch if axis is None: @@ -248,16 +250,17 @@ def concatenate(values, axis=0): else: values = [torch.as_tensor(t) for t in values] - if interface == "tensorflow" and axis is None: + if like == "tensorflow" and axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(np.array(t)) for t in values] axis = 0 - return np.concatenate(values, axis=axis, like=interface) + return np.concatenate(values, axis=axis, like=like) -def diag(values, k=0): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def diag(values, k=0, like=None): """Construct a diagonal tensor from a list of scalars. Args: @@ -291,15 +294,14 @@ def diag(values, k=0): [0.0000, 0.0000, 0.2000], [0.0000, 0.0000, 0.0000]]) """ - interface = _multi_dispatch(values) - if isinstance(values, (list, tuple)): - values = np.stack(np.coerce(values, like=interface), like=interface) + values = np.stack(np.coerce(values, like=like), like=like) - return np.diag(values, k=k, like=interface) + return np.diag(values, k=k, like=like) -def dot(tensor1, tensor2): +@multi_dispatch(argnum=[0, 1]) +def dot(tensor1, tensor2, like=None): """Returns the matrix or dot product of two tensors. * If both tensors are 0-dimensional, elementwise multiplication @@ -323,34 +325,34 @@ def dot(tensor1, tensor2): Returns: tensor_like: the matrix or dot product of two tensors """ - interface = _multi_dispatch([tensor1, tensor2]) - x, y = np.coerce([tensor1, tensor2], like=interface) + x, y = np.coerce([tensor1, tensor2], like=like) - if interface == "torch": + if like == "torch": if x.ndim == 0 and y.ndim == 0: return x * y if x.ndim <= 2 and y.ndim <= 2: return x @ y - return np.tensordot(x, y, axes=[[-1], [-2]], like=interface) + return np.tensordot(x, y, axes=[[-1], [-2]], like=like) - if interface == "tensorflow": + if like == "tensorflow": if len(np.shape(x)) == 0 and len(np.shape(y)) == 0: return x * y if len(np.shape(y)) == 1: - return np.tensordot(x, y, axes=[[-1], [0]], like=interface) + return np.tensordot(x, y, axes=[[-1], [0]], like=like) if len(np.shape(x)) == 2 and len(np.shape(y)) == 2: return x @ y - return np.tensordot(x, y, axes=[[-1], [-2]], like=interface) + return np.tensordot(x, y, axes=[[-1], [-2]], like=like) - return np.dot(x, y, like=interface) + return np.dot(x, y, like=like) -def tensordot(tensor1, tensor2, axes=None): +@multi_dispatch(argnum=[0, 1]) +def tensordot(tensor1, tensor2, axes=None, like=None): """Returns the tensor product of two tensors. In general ``axes`` specifies either the set of axes for both tensors that are contracted (with the first/second entry of ``axes`` @@ -376,12 +378,12 @@ def tensordot(tensor1, tensor2, axes=None): Returns: tensor_like: the tensor product of the two input tensors """ - interface = _multi_dispatch([tensor1, tensor2]) - tensor1, tensor2 = np.coerce([tensor1, tensor2], like=interface) - return np.tensordot(tensor1, tensor2, axes=axes, like=interface) + tensor1, tensor2 = np.coerce([tensor1, tensor2], like=like) + return np.tensordot(tensor1, tensor2, axes=axes, like=like) -def get_trainable_indices(values): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def get_trainable_indices(values, like=None): """Returns a set containing the trainable indices of a sequence of values. @@ -404,10 +406,9 @@ def get_trainable_indices(values): tensor(0.0899685, requires_grad=True) """ trainable = requires_grad - interface = _multi_dispatch(values) trainable_params = set() - if interface == "jax": + if like == "jax": import jax if not any(isinstance(v, jax.core.Tracer) for v in values): @@ -421,7 +422,7 @@ def get_trainable_indices(values): trainable = requires_grad for idx, p in enumerate(values): - if trainable(p, interface=interface): + if trainable(p, interface=like): trainable_params.add(idx) return trainable_params @@ -475,8 +476,7 @@ def safe_squeeze(tensor, axis=None, exclude_axis=None): or not excluded and that have size 1. If no axes are specified or excluded, all axes are attempted to be squeezed. """ - interface = _multi_dispatch([tensor]) - if interface == "tensorflow": + if get_interface(tensor) == "tensorflow": from tensorflow.python.framework.errors_impl import InvalidArgumentError exception = InvalidArgumentError @@ -509,7 +509,8 @@ def safe_squeeze(tensor, axis=None, exclude_axis=None): return tensor -def stack(values, axis=0): +@multi_dispatch(argnum=[0], tensor_list=[0]) +def stack(values, axis=0, like=None): """Stack a sequence of tensors along the specified axis. .. warning:: @@ -538,9 +539,8 @@ def stack(values, axis=0): [1.00e-01, 2.00e-01, 3.00e-01], [5.00e+00, 8.00e+00, 1.01e+02]], dtype=float32)> """ - interface = _multi_dispatch(values) - values = np.coerce(values, like=interface) - return np.stack(values, axis=axis, like=interface) + values = np.coerce(values, like=like) + return np.stack(values, axis=axis, like=like) def where(condition, x=None, y=None): @@ -613,7 +613,8 @@ def where(condition, x=None, y=None): return np.where(condition, x, y, like=_multi_dispatch([condition, x, y])) -def frobenius_inner_product(A, B, normalize=False): +@multi_dispatch(argnum=[0, 1]) +def frobenius_inner_product(A, B, normalize=False, like=None): r"""Frobenius inner product between two matrices. .. math:: @@ -638,8 +639,7 @@ def frobenius_inner_product(A, B, normalize=False): >>> qml.math.frobenius_inner_product(A, B) 3.091948202943376 """ - interface = _multi_dispatch([A, B]) - A, B = np.coerce([A, B], like=interface) + A, B = np.coerce([A, B], like=like) inner_product = np.sum(A * B) @@ -650,6 +650,7 @@ def frobenius_inner_product(A, B, normalize=False): return inner_product +@multi_dispatch(argnum=[0, 2]) def scatter_element_add(tensor, index, value, like=None): """In-place addition of a multidimensional value over various indices of a tensor. @@ -683,8 +684,7 @@ def scatter_element_add(tensor, index, value, like=None): if len(np.shape(tensor)) == 0 and index == (): return tensor + value - interface = like or _multi_dispatch([tensor, value]) - return np.scatter_element_add(tensor, index, value, like=interface) + return np.scatter_element_add(tensor, index, value, like=like) def unwrap(values, max_depth=None):