diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 0970bb66df7..b7b1cb65bba 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -57,12 +57,15 @@ ``` * Support for differentiable execution of batches of circuits has been - added, via the beta `pennylane.batch` module. + added, via the beta `pennylane.interfaces.batch` module. [(#1501)](https://github.com/PennyLaneAI/pennylane/pull/1501) + [(#1508)](https://github.com/PennyLaneAI/pennylane/pull/1508) For example: ```python + from pennylane.interfaces.batch import execute + def cost_fn(x): with qml.tape.JacobianTape() as tape1: qml.RX(x[0], wires=[0]) @@ -76,7 +79,11 @@ qml.CNOT(wires=[0, 1]) qml.probs(wires=1) - result = execute([tape1, tape2], dev, gradient_fn=param_shift) + result = execute( + [tape1, tape2], dev, + gradient_fn=qml.gradients.param_shift, + interface="autograd" + ) return result[0] + result[1][0, 0] res = qml.grad(cost_fn)(params) diff --git a/pennylane/interfaces/batch/__init__.py b/pennylane/interfaces/batch/__init__.py index 2dd08bb188f..36cfdf51d1b 100644 --- a/pennylane/interfaces/batch/__init__.py +++ b/pennylane/interfaces/batch/__init__.py @@ -15,13 +15,137 @@ This subpackage defines functions for interfacing devices' batch execution capabilities with different machine learning libraries. """ -# pylint: disable=import-outside-toplevel,too-many-arguments +# pylint: disable=import-outside-toplevel,too-many-arguments,too-many-branches +from functools import wraps + +from cachetools import LRUCache +import numpy as np + import pennylane as qml from .autograd import execute as execute_autograd -def execute(tapes, device, gradient_fn, interface="autograd", mode="best", gradient_kwargs=None): +def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True): + """Decorator that adds caching to a function that executes + multiple tapes on a device. + + This decorator makes use of :attr:`.QuantumTape.hash` to identify + unique tapes. + + - If a tape does not match a hash in the cache, then the tape + has not been previously executed. It is executed, and the result + added to the cache. + + - If a tape matches a hash in the cache, then the tape has been previously + executed. The corresponding cached result is + extracted, and the tape is not passed to the execution function. + + - Finally, there might be the case where one or more tapes in the current + set of tapes to be executed are identical and thus share a hash. If this is the case, + duplicates are removed, to avoid redundant evaluations. + + Args: + fn (callable): The execution function to add caching to. + This function should have the signature ``fn(tapes, **kwargs)``, + and it should return ``list[tensor_like]``, with the + same length as the input ``tapes``. + cache (None or dict or Cache or bool): The cache to use. If ``None``, + caching will not occur. + pass_kwargs (bool): If ``True``, keyword arguments passed to the + wrapped function will be passed directly to ``fn``. If ``False``, + they will be ignored. + return_tuple (bool): If ``True``, the output of ``fn`` is returned + as a tuple ``(fn_ouput, [])``, to match the output of execution functions + that also return gradients. + + Returns: + function: a wrapped version of the execution function ``fn`` with caching + support + """ + + @wraps(fn) + def wrapper(tapes, **kwargs): + + if not pass_kwargs: + kwargs = {} + + if cache is None or (isinstance(cache, bool) and not cache): + # No caching. Simply execute the execution function + # and return the results. + res = fn(tapes, **kwargs) + return res, [] if return_tuple else res + + execution_tapes = {} + cached_results = {} + hashes = {} + repeated = {} + + for i, tape in enumerate(tapes): + h = tape.hash + + if h in hashes.values(): + # Tape already exists within ``tapes``. Determine the + # index of the first occurrence of the tape, store this, + # and continue to the next iteration. + idx = list(hashes.keys())[list(hashes.values()).index(h)] + repeated[i] = idx + continue + + hashes[i] = h + + if hashes[i] in cache: + # Tape exists within the cache, store the cached result + cached_results[i] = cache[hashes[i]] + else: + # Tape does not exist within the cache, store the tape + # for execution via the execution function. + execution_tapes[i] = tape + + # if there are no execution tapes, simply return! + if not execution_tapes: + if not repeated: + res = list(cached_results.values()) + return res, [] if return_tuple else res + + else: + # execute all unique tapes that do not exist in the cache + res = fn(execution_tapes.values(), **kwargs) + + final_res = [] + + for i, tape in enumerate(tapes): + if i in cached_results: + # insert cached results into the results vector + final_res.append(cached_results[i]) + + elif i in repeated: + # insert repeated results into the results vector + final_res.append(final_res[repeated[i]]) + + else: + # insert evaluated results into the results vector + r = res.pop(0) + final_res.append(r) + cache[hashes[i]] = r + + return final_res, [] if return_tuple else final_res + + wrapper.fn = fn + return wrapper + + +def execute( + tapes, + device, + gradient_fn, + interface="autograd", + mode="best", + gradient_kwargs=None, + cache=True, + cachesize=10000, + max_diff=2, +): """Execute a batch of tapes on a device in an autodifferentiable-compatible manner. Args: @@ -42,6 +166,13 @@ def execute(tapes, device, gradient_fn, interface="autograd", mode="best", gradi pass. gradient_kwargs (dict): dictionary of keyword arguments to pass when determining the gradients of tapes + cache (bool): Whether to cache evaluations. This can result in + a significant reduction in quantum evaluations during gradient computations. + cachesize (int): the size of the cache + max_diff (int): If ``gradient_fn`` is a gradient transform, this option specifies + the maximum number of derivatives to support. Increasing this value allows + for higher order derivatives to be extracted, at the cost of additional + (classical) computational overhead during the backwards pass. Returns: list[list[float]]: A nested list of tape results. Each element in @@ -101,11 +232,15 @@ def cost_fn(params, x): [ 0.01983384, -0.97517033, 0. ], [ 0. , 0. , -0.95533649]]) """ - # Default execution function; simply call device.batch_execute - # and return no Jacobians. - execute_fn = lambda tapes, **kwargs: (device.batch_execute(tapes), []) gradient_kwargs = gradient_kwargs or {} + if isinstance(cache, bool) and cache: + # cache=True: create a LRUCache object + cache = LRUCache(maxsize=cachesize, getsizeof=len) + + # the default execution function is device.batch_execute + execute_fn = cache_execute(device.batch_execute, cache) + if gradient_fn == "device": # gradient function is a device method @@ -116,8 +251,13 @@ def cost_fn(params, x): gradient_fn = None elif mode == "backward": + # disable caching on the forward pass + execute_fn = cache_execute(device.batch_execute, cache=None) + # replace the backward gradient computation - gradient_fn = device.gradients + gradient_fn = cache_execute( + device.gradients, cache, pass_kwargs=True, return_tuple=False + ) elif mode == "forward": # In "forward" mode, gradients are automatically handled @@ -126,6 +266,10 @@ def cost_fn(params, x): raise ValueError("Gradient transforms cannot be used with mode='forward'") if interface == "autograd": - return execute_autograd(tapes, device, execute_fn, gradient_fn, gradient_kwargs) + res = execute_autograd( + tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff + ) + else: + raise ValueError(f"Unknown interface {interface}") - raise ValueError(f"Unknown interface {interface}") + return res diff --git a/pennylane/interfaces/batch/autograd.py b/pennylane/interfaces/batch/autograd.py index 252caafed1e..be71c9920f6 100644 --- a/pennylane/interfaces/batch/autograd.py +++ b/pennylane/interfaces/batch/autograd.py @@ -25,7 +25,7 @@ from pennylane import numpy as np -def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1): +def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=2): """Execute a batch of tapes with Autograd parameters on a device. Args: @@ -42,6 +42,10 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1): gradient_fn (callable): the gradient function to use to compute quantum gradients _n (int): a positive integer used to track nesting of derivatives, for example if the nth-order derivative is requested. + max_diff (int): If ``gradient_fn`` is a gradient transform, this option specifies + the maximum order of derivatives to support. Increasing this value allows + for higher order derivatives to be extracted, at the cost of additional + (classical) computational overhead during the backwards pass. Returns: list[list[float]]: A nested list of tape results. Each element in @@ -64,6 +68,7 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1): gradient_fn=gradient_fn, gradient_kwargs=gradient_kwargs, _n=_n, + max_diff=max_diff, )[0] @@ -76,6 +81,7 @@ def _execute( gradient_fn=None, gradient_kwargs=None, _n=1, + max_diff=2, ): # pylint: disable=dangerous-default-value,unused-argument """Autodifferentiable wrapper around ``Device.batch_execute``. @@ -119,6 +125,7 @@ def vjp( gradient_fn=None, gradient_kwargs=None, _n=1, + max_diff=2, ): # pylint: disable=dangerous-default-value,unused-argument """Returns the vector-Jacobian product operator for a batch of quantum tapes. @@ -139,6 +146,10 @@ def vjp( determining the gradients of tapes _n (int): a positive integer used to track nesting of derivatives, for example if the nth-order derivative is requested. + max_diff (int): If ``gradient_fn`` is a gradient transform, this option specifies + the maximum number of derivatives to support. Increasing this value allows + for higher order derivatives to be extracted, at the cost of additional + (classical) computational overhead during the backwards pass. Returns: function: this function accepts the backpropagation @@ -169,18 +180,43 @@ def grad_fn(dy): if "pennylane.gradients" in module_name: # Generate and execute the required gradient tapes - vjp_tapes, processing_fn = qml.gradients.batch_vjp( - tapes, dy, gradient_fn, reduction="append", gradient_kwargs=gradient_kwargs - ) - - # This is where the magic happens. Note that we call ``execute``. - # This recursion, coupled with the fact that the gradient transforms - # are differentiable, allows for arbitrary order differentiation. - vjps = processing_fn( - execute(vjp_tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=_n + 1) - ) - - elif inspect.ismethod(gradient_fn) and gradient_fn.__self__ is device: + if _n == max_diff: + with qml.tape.Unwrap(*tapes): + vjp_tapes, processing_fn = qml.gradients.batch_vjp( + tapes, + dy, + gradient_fn, + reduction="append", + gradient_kwargs=gradient_kwargs, + ) + + vjps = processing_fn(execute_fn(vjp_tapes)[0]) + + else: + vjp_tapes, processing_fn = qml.gradients.batch_vjp( + tapes, dy, gradient_fn, reduction="append", gradient_kwargs=gradient_kwargs + ) + + # This is where the magic happens. Note that we call ``execute``. + # This recursion, coupled with the fact that the gradient transforms + # are differentiable, allows for arbitrary order differentiation. + vjps = processing_fn( + execute( + vjp_tapes, + device, + execute_fn, + gradient_fn, + gradient_kwargs, + _n=_n + 1, + max_diff=max_diff, + ) + ) + + elif ( + hasattr(gradient_fn, "fn") + and inspect.ismethod(gradient_fn.fn) + and gradient_fn.fn.__self__ is device + ): # Gradient function is a device method. # Note that unlike the previous branch: # diff --git a/pennylane/measure.py b/pennylane/measure.py index fa30a2f7ae5..e48e7e9493f 100644 --- a/pennylane/measure.py +++ b/pennylane/measure.py @@ -202,6 +202,26 @@ def queue(self, context=qml.QueuingContext): return self + @property + def hash(self): + """int: returns an integer hash uniquely representing the measurement process""" + if self.obs is None: + fingerprint = ( + str(self.name), + tuple(self.wires.tolist()), + str(self.data), + self.return_type, + ) + else: + fingerprint = ( + str(self.obs.name), + tuple(self.wires.tolist()), + str(self.obs.data), + self.return_type, + ) + + return hash(fingerprint) + def expval(op): r"""Expectation value of the supplied observable. diff --git a/pennylane/operation.py b/pennylane/operation.py index 8b3509845f4..3679420a7b4 100644 --- a/pennylane/operation.py +++ b/pennylane/operation.py @@ -233,6 +233,16 @@ def classproperty(func): # ============================================================================= +def _process_data(op): + if op.name in ("RX", "RY", "RZ", "PhaseShift", "Rot"): + return str([d % (2 * np.pi) for d in op.data]) + + if op.name in ("CRX", "CRY", "CRZ", "CRot"): + return str([d % (4 * np.pi) for d in op.data]) + + return str(op.data) + + class Operator(abc.ABC): r"""Base class for quantum operators supported by a device. @@ -282,6 +292,11 @@ def __deepcopy__(self, memo): setattr(copied_op, attribute, copy.deepcopy(value, memo)) return copied_op + @property + def hash(self): + """int: returns an integer hash uniquely representing the operator""" + return hash((str(self.name), tuple(self.wires.tolist()), _process_data(self))) + @classmethod def _matrix(cls, *params): """Matrix representation of the operator diff --git a/pennylane/tape/tape.py b/pennylane/tape/tape.py index 6004c117d4e..dd5cae7647c 100644 --- a/pennylane/tape/tape.py +++ b/pennylane/tape/tape.py @@ -1243,6 +1243,15 @@ def copy(self, copy_operations=False, tape_cls=None): def __copy__(self): return self.copy(copy_operations=True) + @property + def hash(self): + """int: returns an integer hash uniquely representing the quantum tape""" + fingerprint = [] + fingerprint.extend(op.hash for op in self.operations) + fingerprint.extend(m.hash for m in self.measurements) + fingerprint.extend(self.trainable_params) + return hash(tuple(fingerprint)) + # ======================================================== # execution methods # ======================================================== diff --git a/requirements.txt b/requirements.txt index 79bb69b1e46..25a996ac7cb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ numpy==1.21 scipy==1.7 cvxpy==1.1 cvxopt==1.2 +cachetools==4.2.2 networkx==2.6 tensornetwork==0.3 autograd==1.3 diff --git a/setup.py b/setup.py index 5531f525d82..6d99a623329 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,8 @@ "toml", "appdirs", "semantic_version==2.6", - "autoray" + "autoray", + "cachetools" ] info = { diff --git a/tests/interfaces/test_batch_autograd.py b/tests/interfaces/test_batch_autograd.py index 435c5004c3c..e9173381100 100644 --- a/tests/interfaces/test_batch_autograd.py +++ b/tests/interfaces/test_batch_autograd.py @@ -159,6 +159,181 @@ def cost(a): spy_gradients.assert_called() +class TestCaching: + """Test for caching behaviour""" + + def test_cache_maxsize(self, mocker): + """Test the cachesize property of the cache""" + dev = qml.device("default.qubit", wires=1) + spy = mocker.spy(qml.interfaces.batch, "cache_execute") + + def cost(a, cachesize): + with qml.tape.JacobianTape() as tape: + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + qml.probs(wires=0) + + return execute([tape], dev, gradient_fn=param_shift, cachesize=cachesize)[0] + + params = np.array([0.1, 0.2]) + qml.jacobian(cost)(params, cachesize=2) + cache = spy.call_args[0][1] + + assert cache.maxsize == 2 + assert cache.currsize == 2 + assert len(cache) == 2 + + def test_custom_cache(self, mocker): + """Test the use of a custom cache object""" + dev = qml.device("default.qubit", wires=1) + spy = mocker.spy(qml.interfaces.batch, "cache_execute") + + def cost(a, cache): + with qml.tape.JacobianTape() as tape: + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + qml.probs(wires=0) + + return execute([tape], dev, gradient_fn=param_shift, cache=cache)[0] + + custom_cache = {} + params = np.array([0.1, 0.2]) + qml.jacobian(cost)(params, cache=custom_cache) + + cache = spy.call_args[0][1] + assert cache is custom_cache + + def test_caching_param_shift(self, tol): + """Test that, when using parameter-shift transform, + caching reduces the number of evaluations to their optimum.""" + dev = qml.device("default.qubit", wires=1) + + def cost(a, cache): + with qml.tape.JacobianTape() as tape: + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + qml.probs(wires=0) + + return execute([tape], dev, gradient_fn=param_shift, cache=cache)[0] + + # Without caching, 9 evaluations are required to compute + # the Jacobian: 1 (forward pass) + 2 (backward pass) * (2 shifts * 2 params) + params = np.array([0.1, 0.2]) + qml.jacobian(cost)(params, cache=None) + assert dev.num_executions == 9 + + # With caching, 5 evaluations are required to compute + # the Jacobian: 1 (forward pass) + (2 shifts * 2 params) + dev._num_executions = 0 + jac_fn = qml.jacobian(cost) + grad1 = jac_fn(params, cache=True) + assert dev.num_executions == 5 + + # Check that calling the cost function again + # continues to evaluate the device (that is, the cache + # is emptied between calls) + grad2 = jac_fn(params, cache=True) + assert dev.num_executions == 10 + assert np.allclose(grad1, grad2, atol=tol, rtol=0) + + # Check that calling the cost function again + # with different parameters produces a different Jacobian + grad2 = jac_fn(2 * params, cache=True) + assert dev.num_executions == 15 + assert not np.allclose(grad1, grad2, atol=tol, rtol=0) + + @pytest.mark.parametrize("num_params", [2, 3]) + def test_caching_param_shift_hessian(self, num_params, tol): + """Test that, when using parameter-shift transform, + caching reduces the number of evaluations to their optimum + when computing Hessians.""" + dev = qml.device("default.qubit", wires=2) + params = np.arange(1, num_params + 1) / 10 + + N = len(params) + + def cost(x, cache): + with qml.tape.JacobianTape() as tape: + qml.RX(x[0], wires=[0]) + qml.RY(x[1], wires=[1]) + + for i in range(2, num_params): + qml.RZ(x[i], wires=[i % 2]) + + qml.CNOT(wires=[0, 1]) + qml.var(qml.PauliZ(0) @ qml.PauliX(1)) + + return execute([tape], dev, gradient_fn=param_shift, cache=cache)[0] + + # No caching: number of executions is not ideal + hess1 = qml.jacobian(qml.grad(cost))(params, cache=False) + + if num_params == 2: + # compare to theoretical result + x, y, *_ = params + expected = np.array( + [ + [2 * np.cos(2 * x) * np.sin(y) ** 2, np.sin(2 * x) * np.sin(2 * y)], + [np.sin(2 * x) * np.sin(2 * y), -2 * np.cos(x) ** 2 * np.cos(2 * y)], + ] + ) + assert np.allclose(expected, hess1, atol=tol, rtol=0) + + expected_runs = 1 # forward pass + expected_runs += 2 * N # Jacobian + expected_runs += 4 * N + 1 # Hessian diagonal + expected_runs += 4 * N ** 2 # Hessian off-diagonal + assert dev.num_executions == expected_runs + + # Use caching: number of executions is ideal + dev._num_executions = 0 + hess2 = qml.jacobian(qml.grad(cost))(params, cache=True) + assert np.allclose(hess1, hess2, atol=tol, rtol=0) + + expected_runs_ideal = 1 # forward pass + expected_runs_ideal += 2 * N # Jacobian + expected_runs_ideal += 2 * N + 1 # Hessian diagonal + expected_runs_ideal += 4 * N * (N - 1) // 2 # Hessian off-diagonal + assert dev.num_executions == expected_runs_ideal + assert expected_runs_ideal < expected_runs + + def test_caching_adjoint_backward(self): + """Test that caching reduces the number of adjoint evaluations + when mode=backward""" + dev = qml.device("default.qubit", wires=2) + params = np.array([0.1, 0.2, 0.3]) + + def cost(a, cache): + with qml.tape.JacobianTape() as tape: + qml.RY(a[0], wires=0) + qml.RX(a[1], wires=0) + qml.RY(a[2], wires=0) + qml.expval(qml.PauliZ(0)) + qml.expval(qml.PauliZ(1)) + + return execute( + [tape], + dev, + gradient_fn="device", + cache=cache, + mode="backward", + gradient_kwargs={"method": "adjoint_jacobian"}, + )[0] + + # Without caching, 3 evaluations are required. + # 1 for the forward pass, and one per output dimension + # on the backward pass. + qml.jacobian(cost)(params, cache=None) + assert dev.num_executions == 3 + + # With caching, only 2 evaluations are required. One + # for the forward pass, and one for the backward pass. + dev._num_executions = 0 + jac_fn = qml.jacobian(cost) + grad1 = jac_fn(params, cache=True) + assert dev.num_executions == 2 + + execute_kwargs = [ {"gradient_fn": param_shift}, { @@ -594,3 +769,42 @@ def cost_fn(x): res = qml.jacobian(qml.grad(cost_fn))(params) assert np.allclose(res, np.zeros([2, 2]), atol=tol, rtol=0) + + def test_max_diff(self, tol): + """Test that setting the max_diff parameter blocks higher-order + derivatives""" + dev = qml.device("default.qubit.autograd", wires=2) + params = np.array([0.543, -0.654], requires_grad=True) + + def cost_fn(x): + with qml.tape.JacobianTape() as tape1: + qml.RX(x[0], wires=[0]) + qml.RY(x[1], wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.var(qml.PauliZ(0) @ qml.PauliX(1)) + + with qml.tape.JacobianTape() as tape2: + qml.RX(x[0], wires=0) + qml.RY(x[0], wires=1) + qml.CNOT(wires=[0, 1]) + qml.probs(wires=1) + + result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=1) + return result[0] + result[1][0, 0] + + res = cost_fn(params) + x, y = params + expected = 0.5 * (3 + np.cos(x) ** 2 * np.cos(2 * y)) + assert np.allclose(res, expected, atol=tol, rtol=0) + + res = qml.grad(cost_fn)(params) + expected = np.array( + [-np.cos(x) * np.cos(2 * y) * np.sin(x), -np.cos(x) ** 2 * np.sin(2 * y)] + ) + assert np.allclose(res, expected, atol=tol, rtol=0) + + with pytest.warns(UserWarning, match="Output seems independent"): + res = qml.jacobian(qml.grad(cost_fn))(params) + + expected = np.zeros([2, 2]) + assert np.allclose(res, expected, atol=tol, rtol=0) diff --git a/tests/tape/test_tape.py b/tests/tape/test_tape.py index c9d81de51f8..2bfe534fdf8 100644 --- a/tests/tape/test_tape.py +++ b/tests/tape/test_tape.py @@ -1466,3 +1466,225 @@ def test_gate_tape(): assert qml.tape.get_active_tape() is tape1 assert qml.tape.get_active_tape() is None + + +class TestHashing: + """Test for tape hashing""" + + @pytest.mark.parametrize( + "m", + [ + qml.expval(qml.PauliZ(0)), + qml.state(), + qml.probs(wires=0), + qml.density_matrix(wires=0), + qml.var(qml.PauliY(0)), + ], + ) + def test_identical(self, m): + """Tests that the circuit hash of identical circuits are identical""" + a = 0.3 + b = 0.2 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.apply(m) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.apply(m) + + assert tape1.hash == tape2.hash + + def test_identical_numeric(self): + """Tests that the circuit hash of identical circuits are identical + even though the datatype of the arguments may differ""" + a = 0.3 + b = 0.2 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(np.array(a), wires=[0]) + qml.RY(np.array(b), wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + assert tape1.hash == tape2.hash + + def test_different_wires(self): + """Tests that the circuit hash of circuits with the same operations + on different wires have different hashes""" + a = 0.3 + b = 0.2 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[1]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(np.array(a), wires=[0]) + qml.RY(np.array(b), wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + assert tape1.hash != tape2.hash + + def test_different_trainabilities(self): + """Tests that the circuit hash of identical circuits differ + if the circuits have different trainable parameters""" + a = 0.3 + b = 0.2 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) + + tape1.trainable_params = {0} + tape2.trainable_params = {0, 1} + assert tape1.hash != tape2.hash + + def test_different_parameters(self): + """Tests that the circuit hash of circuits with different + parameters differs""" + a = 0.3 + b = 0.2 + c = 0.6 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a, wires=[0]) + qml.RY(c, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0)) + + assert tape1.hash != tape2.hash + + def test_different_operations(self): + """Tests that the circuit hash of circuits with different + operations differs""" + a = 0.3 + b = 0.2 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RZ(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0)) + + assert tape1.hash != tape2.hash + + def test_different_measurements(self): + """Tests that the circuit hash of circuits with different + measurements differs""" + a = 0.3 + b = 0.2 + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.var(qml.PauliZ(0)) + + assert tape1.hash != tape2.hash + + def test_different_observables(self): + """Tests that the circuit hash of circuits with different + observables differs""" + a = 0.3 + b = 0.2 + + A = np.diag([1.0, 2.0]) + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.PauliZ(0)) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.Hermitian(A, wires=0)) + + assert tape1.hash != tape2.hash + + def test_rotation_modulo_identical(self): + """Tests that the circuit hash of circuits with single-qubit + rotations differing by multiples of 2pi have identical hash""" + a = np.array(np.pi / 2, dtype=np.float64) + b = np.array(np.pi / 4, dtype=np.float64) + + H = qml.Hamiltonian([0.1, 0.2], [qml.PauliX(0), qml.PauliZ(0) @ qml.PauliY(1)]) + + with qml.tape.QuantumTape() as tape1: + qml.RX(a, wires=[0]) + qml.RY(b, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(H) + + with qml.tape.QuantumTape() as tape2: + qml.RX(a - 2 * np.pi, wires=[0]) + qml.RY(b + 2 * np.pi, wires=[1]) + qml.CNOT(wires=[0, 1]) + qml.expval(H) + + assert tape1.hash == tape2.hash + + def test_controlled_rotation_modulo_identical(self): + """Tests that the circuit hash of circuits with controlled + rotations differing by multiples of 2pi have identical hash""" + a = np.array(np.pi / 2, dtype=np.float64) + b = np.array(np.pi / 2, dtype=np.float64) + + H = qml.Hamiltonian([0.1, 0.2], [qml.PauliX(0), qml.PauliZ(0) @ qml.PauliY(1)]) + + with qml.tape.QuantumTape() as tape1: + qml.CRX(a, wires=[0, 1]) + qml.CRY(b, wires=[0, 1]) + qml.CNOT(wires=[0, 1]) + qml.expval(H) + + with qml.tape.QuantumTape() as tape2: + qml.CRX(a - 4 * np.pi, wires=[0, 1]) + qml.CRY(b + 4 * np.pi, wires=[0, 1]) + qml.CNOT(wires=[0, 1]) + qml.expval(H) + + assert tape1.hash == tape2.hash