Skip to content

Commit

Permalink
Add grid parallel two-qubit XEB (#2834)
Browse files Browse the repository at this point in the history
This separates data collection and data analysis using idioms suggested by @mpharrigan (though note Matt that my definitions of `save` and `load` differ from the ones we've been using internally).
  • Loading branch information
kevinsung committed Jul 9, 2020
1 parent 62ef7f7 commit 1a6c6f6
Show file tree
Hide file tree
Showing 14 changed files with 1,131 additions and 6 deletions.
2 changes: 2 additions & 0 deletions cirq/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@
from cirq.experiments import (
estimate_single_qubit_readout_errors,
hog_score_xeb_fidelity_from_probabilities,
least_squares_xeb_fidelity_from_expectations,
least_squares_xeb_fidelity_from_probabilities,
linear_xeb_fidelity,
linear_xeb_fidelity_from_probabilities,
log_xeb_fidelity,
Expand Down
8 changes: 8 additions & 0 deletions cirq/experiments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,25 @@
build_entangling_layers,
cross_entropy_benchmarking,
CrossEntropyResult,
CrossEntropyResultDict,
)

from cirq.experiments.fidelity_estimation import (
hog_score_xeb_fidelity_from_probabilities,
least_squares_xeb_fidelity_from_expectations,
least_squares_xeb_fidelity_from_probabilities,
linear_xeb_fidelity,
linear_xeb_fidelity_from_probabilities,
log_xeb_fidelity,
log_xeb_fidelity_from_probabilities,
xeb_fidelity,
)

from cirq.experiments.grid_parallel_two_qubit_xeb import (
collect_grid_parallel_two_qubit_xeb_data,
compute_grid_parallel_two_qubit_xeb_results,
)

from cirq.experiments.random_quantum_circuit_generation import (
GRID_ALIGNED_PATTERN,
GRID_STAGGERED_PATTERN,
Expand Down
45 changes: 43 additions & 2 deletions cirq/experiments/cross_entropy_benchmarking.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import (Any, Dict, Iterable, List, NamedTuple, Optional, Sequence,
Set, Tuple, Union)
from typing import (Any, Dict, Iterable, List, Mapping, NamedTuple, Optional,
Sequence, Set, TYPE_CHECKING, Tuple, Union)
import dataclasses
import numpy as np
import scipy
from matplotlib import pyplot as plt
from cirq import circuits, devices, ops, protocols, sim, work

if TYPE_CHECKING:
import cirq

CrossEntropyPair = NamedTuple('CrossEntropyPair', [('num_cycle', int),
('xeb_fidelity', float)])

Expand Down Expand Up @@ -144,6 +147,44 @@ def __repr__(self) -> str:
f'repetitions={self.repetitions!r})')


@dataclasses.dataclass
class CrossEntropyResultDict(Mapping[Tuple['cirq.Qid', ...], CrossEntropyResult]
):
"""Per-qubit-tuple results from cross-entropy benchmarking.
Attributes:
results: Dictionary from qubit tuple to cross-entropy benchmarking
result for that tuple.
"""
results: Dict[Tuple['cirq.Qid', ...], CrossEntropyResult]

def _json_dict_(self) -> Dict[str, Any]:
return {
'cirq_type': self.__class__.__name__,
'results': list(self.results.items()),
}

@classmethod
def _from_json_dict_(
cls, results: List[Tuple[List['cirq.Qid'], CrossEntropyResult]],
**kwargs) -> 'CrossEntropyResultDict':
return cls(
results={tuple(qubits): result for qubits, result in results})

def __repr__(self) -> str:
return ('cirq.experiments.CrossEntropyResultDict('
f'results={self.results!r})')

def __getitem__(self, key: Tuple['cirq.Qid', ...]) -> CrossEntropyResult:
return self.results[key]

def __iter__(self):
return iter(self.results)

def __len__(self):
return len(self.results)


def cross_entropy_benchmarking(
sampler: work.Sampler,
qubits: Sequence[ops.Qid],
Expand Down
13 changes: 12 additions & 1 deletion cirq/experiments/cross_entropy_benchmarking_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
import matplotlib.pyplot as plt
import cirq

from cirq.experiments import (CrossEntropyResult, cross_entropy_benchmarking,
from cirq.experiments import (CrossEntropyResult, CrossEntropyResultDict,
cross_entropy_benchmarking,
build_entangling_layers)
from cirq.experiments.cross_entropy_benchmarking import CrossEntropyPair

Expand Down Expand Up @@ -106,3 +107,13 @@ def test_cross_entropy_result_repr():
CrossEntropyPair(5, 0.5)],
repetitions=1000)
cirq.testing.assert_equivalent_repr(result)


def test_cross_entropy_result_dict_repr():
pair = tuple(cirq.LineQubit.range(2))
result = CrossEntropyResult(
data=[CrossEntropyPair(2, 0.9),
CrossEntropyPair(5, 0.5)],
repetitions=1000)
result_dict = CrossEntropyResultDict(results={pair: result})
cirq.testing.assert_equivalent_repr(result_dict)
152 changes: 151 additions & 1 deletion cirq/experiments/fidelity_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
"""Estimation of fidelity associated with experimental circuit executions."""

from typing import Callable, Mapping, Optional, Sequence
from typing import Callable, List, Mapping, Optional, Sequence, Tuple, cast

import numpy as np

Expand Down Expand Up @@ -227,3 +227,153 @@ def log_xeb_fidelity(
qubit_order,
amplitudes,
estimator=log_xeb_fidelity_from_probabilities)


def least_squares_xeb_fidelity_from_expectations(
measured_expectations: Sequence[float],
exact_expectations: Sequence[float],
uniform_expectations: Sequence[float]) -> Tuple[float, List[float]]:
"""Least squares fidelity estimator.
An XEB experiment collects data from the execution of random circuits
subject to noise. The effect of applying a random circuit with unitary U is
modeled as U followed by a depolarizing channel. The result is that the
initial state |𝜓⟩ is mapped to a density matrix ρ_U as follows:
|𝜓⟩ → ρ_U = f |𝜓_U⟩⟨𝜓_U| + (1 - f) I / D
where |𝜓_U⟩ = U|𝜓⟩, D is the dimension of the Hilbert space, I / D is the
maximally mixed state, and f is the fidelity with which the circuit is
applied. Let O_U be an observable that is diagonal in the computational
basis. Then the expectation of O_U on ρ_U is given by
Tr(ρ_U O_U) = f ⟨𝜓_U|O_U|𝜓_U⟩ + (1 - f) Tr(O_U / D).
This equation shows how f can be estimated, since Tr(ρ_U O_U) can be
estimated from experimental data, and ⟨𝜓_U|O_U|𝜓_U⟩ and Tr(O_U / D) can be
computed numerically.
Let e_U = ⟨𝜓_U|O_U|𝜓_U⟩, u_U = Tr(O_U / D), and m_U denote the experimental
estimate of Tr(ρ_U O_U). Then we estimate f by performing least squares
minimization of the quantity
f (e_U - u_U) - (m_U - u_U)
over different random circuits (giving different U). The solution to the
least squares problem is given by
f = (∑_U (m_U - u_U) * (e_U - u_U)) / (∑_U (e_U - u_U)^2).
Args:
measured_expectations: A sequence of the m_U, the experimental estimates
of the observable, one for each circuit U.
exact_expectations: A sequence of the e_U, the exact value of the
observable. The order should match the order of the
`measured_expectations` argument.
uniform_expectations: A sequence of the u_U, the expectation of the
observable on a uniformly random bitstring. The order should match
the order in the other arguments.
Returns:
A tuple of two values. The first value is the estimated fidelity.
The second value is a list of the residuals
f (e_U - u_U) - (m_U - u_U)
of the least squares minimization.
Raises:
ValueError: The lengths of the input sequences are not all the same.
"""
if not (len(measured_expectations) == len(exact_expectations) ==
len(uniform_expectations)):
raise ValueError('The lengths of measured_expectations, '
'exact_expectations, and uniform_expectations must '
'all be the same. Got lengths '
f'{len(measured_expectations)}, '
f'{len(exact_expectations)}, and '
f'{len(uniform_expectations)}.')
numerator = 0.0
denominator = 0.0
for m, e, u in zip(measured_expectations, exact_expectations,
uniform_expectations):
numerator += (m - u) * (e - u)
denominator += (e - u)**2
fidelity = numerator / denominator
residuals = [
fidelity * (e - u) - (m - u) for m, e, u in zip(
measured_expectations, exact_expectations, uniform_expectations)
]
return fidelity, residuals


def least_squares_xeb_fidelity_from_probabilities(
hilbert_space_dimension: int,
observed_probabilities: Sequence[Sequence[float]],
all_probabilities: Sequence[Sequence[float]],
observable_from_probability: Optional[Callable[[float], float]] = None,
normalize_probabilities: bool = True) -> Tuple[float, List[float]]:
"""Least squares fidelity estimator with observable based on probabilities.
Using the notation from the docstring of
`least_squares_xeb_fidelity_from_expectations`, this function computes the
least squares fidelity estimate when the observable O_U has eigenvalue
corresponding to the computational basis state |z⟩ given by g(p(z)), where
p(z) = |⟨z|𝜓_U⟩|^2 and g is a function that can be specified. By default,
g is the identity function, but other choices, such as the logarithm, are
useful. By default, the probability p(z) is actually multiplied by the
Hilbert space dimension D, so that the observable is actually g(D * p(z)).
This behavior can be disabled by setting `normalize_probabilities` to
False.
Args:
hilbert_space_dimension: Dimension of the Hilbert space on which
the channel whose fidelity is being estimated is defined.
observed_probabilities: Ideal probabilities of bitstrings observed in
experiments. A list of lists, where each inner list contains the
probabilities for a single circuit.
all_probabilities: Ideal probabilities of all possible bitstrings.
A list of lists, where each inner list contains the probabilities
for a single circuit, and should have length equal to the Hilbert
space dimension. The order of the lists should correspond to that
of `observed_probabilities`.
observable_from_probability: Function that computes the observable from
a given probability.
normalize_probabilities: Whether to multiply the probabilities by the
Hilbert space dimension before computing the observable.
Returns:
A tuple of two values. The first value is the estimated fidelity.
The second value is a list of the residuals
f (e_U - u_U) - (m_U - u_U)
of the least squares minimization.
"""
if not isinstance(observable_from_probability, np.ufunc):
if observable_from_probability is None:
observable_from_probability = lambda p: p
else:
observable_from_probability = np.frompyfunc(
observable_from_probability, 1, 1)
observable_from_probability = cast(Callable, observable_from_probability)
measured_expectations = []
exact_expectations = []
uniform_expectations = []
prefactor = hilbert_space_dimension if normalize_probabilities else 1.0
for observed_probs, all_probs in zip(observed_probabilities,
all_probabilities):
observed_probs = np.array(observed_probs)
all_probs = np.array(all_probs)
observable = observable_from_probability(prefactor *
cast(np.ndarray, all_probs))
measured_expectations.append(
np.mean(
observable_from_probability(prefactor *
cast(np.ndarray, observed_probs))))
exact_expectations.append(np.sum(all_probs * observable))
uniform_expectations.append(
np.sum(observable) / hilbert_space_dimension)
return least_squares_xeb_fidelity_from_expectations(measured_expectations,
exact_expectations,
uniform_expectations)

0 comments on commit 1a6c6f6

Please sign in to comment.