Skip to content

Commit

Permalink
Calibration to fidelity (#4431)
Browse files Browse the repository at this point in the history
Ignore the fidelity.py and fidelity_test.py files- those are already under review. All feedback is appreciated!
  • Loading branch information
asmuzsoy committed Aug 19, 2021
1 parent aa8184c commit e6c584b
Show file tree
Hide file tree
Showing 2 changed files with 398 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import cirq_google
import numpy as np
from cirq.devices.noise_properties import NoiseProperties


def _xeb_fidelity_to_decay_constant(xeb_fidelity, num_qubits=2):
# Converts from XEB Fidelity to depolarization decay constant
if xeb_fidelity is not None:
N = 2 ** num_qubits # Dimension of Hilbert space
return 1 - (1 - xeb_fidelity) / (1 - 1 / N)
return None


def _rb_average_error_to_decay_constant(rb_average_error, num_qubits: int = 1):
# Converts from randomized benchmarking average error to depolarization decay constant
if rb_average_error is not None:
N = 2 ** num_qubits # Dimension of Hilbert space
return 1 - rb_average_error / (1 - 1 / N)
else:
return None


def _rb_pauli_error_to_decay_constant(rb_pauli_error, num_qubits: int = 1):
# Converts from randomized benchmarking pauli error to depolarization decay constant
if rb_pauli_error is not None:
N = 2 ** num_qubits # Dimension of Hilbert space
return 1 - rb_pauli_error / (1 - 1 / N ** 2)
else:
return None


def _within_tolerance(val_1, val_2, tolerance):
# Helper function to check if two values are within tolerance
if val_1 is None or val_2 is None:
return True
return abs(val_1 - val_2) <= tolerance


def _unpack_from_calibration(metric_name, calibration):
# Gets the average (over all qubits) of each metric
# TODO: Add support for per-qubit noise
if metric_name in calibration.keys():
return np.mean([value for qubit, value in calibration[metric_name].items()])
else:
return None


def noise_properties_from_calibration(
calibration: cirq_google.Calibration, validate: bool = True, tolerance: float = 0.01
):
"""Translates between a Calibration object and a NoiseProperties object.
The NoiseProperties object can then be used as input to the NoiseModelFromNoiseProperties
class (cirq.devices.noise_properties) to create a NoiseModel that can be used with a simulator.
If the validate argument is set to false, the depolarization decay constant will be calculated
from the RB Pauli error if defined, the XEB Fidelity if RB Pauli error is not defined, or the
RB Average error if the others are not defined.
Args:
calibration: a Calibration object with hardware metrics
validate: whether or not to check that the depolarization decay constants calculated from
RB Pauli error, RB average error, & XEB Fidelity agree to within a given tolerance
tolerance: threshold for validating decay constants frmo RB Pauli error, RB Average error,
and XEB fidelity.
Raises:
ValueError: decay constants from RB Average Error and RB Pauli Error aren't within tolerance
ValueError: decay constants from RB Pauli Error and XEB Fidelity aren't within tolerance
ValueError: decay constant from RB Pauli Error and XEB Fidelity aren't within tolerance
"""

# Unpack all values from Calibration object
t1_micros = _unpack_from_calibration('single_qubit_idle_t1_micros', calibration)
t1_nanos = t1_micros * 1000 if t1_micros is not None else None
xeb_fidelity = _unpack_from_calibration('xeb', calibration)
rb_pauli_error = _unpack_from_calibration('single_qubit_rb_pauli_error_per_gate', calibration)
rb_average_error = _unpack_from_calibration(
'single_qubit_rb_average_error_per_gate', calibration
)
p00 = _unpack_from_calibration('single_qubit_p00_error', calibration)
p11 = _unpack_from_calibration('single_qubit_p11_error', calibration)
decay_constant_pauli = _rb_pauli_error_to_decay_constant(rb_pauli_error)

decay_constant_average = _rb_average_error_to_decay_constant(rb_average_error)

if validate: # Will throw error if metrics aren't compatible
if not _within_tolerance(decay_constant_pauli, decay_constant_average, tolerance):
raise ValueError(
f'Decay constant from RB Pauli error: {decay_constant_pauli}, '
f'decay constant from RB Average error: {decay_constant_average}. '
'If validation is disabled, RB Pauli error will be used.'
)
decay_constant_from_xeb = _xeb_fidelity_to_decay_constant(xeb_fidelity)
if not _within_tolerance(decay_constant_from_xeb, decay_constant_pauli, tolerance):
raise ValueError(
f'Decay constant from RB Pauli error: {decay_constant_pauli}, '
f'decay constant from XEB Fidelity: {decay_constant_from_xeb}. '
'If validation is disabled, RB Pauli error will be used.'
)
if not _within_tolerance(decay_constant_from_xeb, decay_constant_average, tolerance):
raise ValueError(
f'Decay constant from RB Average error: {decay_constant_average}, '
f'decay constant from XEB Fidelity: {decay_constant_from_xeb}. '
'If validation is disabled, XEB Fidelity will be used.'
)

if decay_constant_pauli is not None: # can't define both decay constant and xeb
return NoiseProperties(
t1_ns=t1_nanos, decay_constant=decay_constant_pauli, p00=p00, p11=p11
)
if xeb_fidelity is not None:
return NoiseProperties(t1_ns=t1_nanos, xeb_fidelity=xeb_fidelity, p00=p00, p11=p11)
return NoiseProperties(t1_ns=t1_nanos, decay_constant=decay_constant_average, p00=p00, p11=p11)
Original file line number Diff line number Diff line change
@@ -0,0 +1,283 @@
import pytest
import cirq_google
from cirq_google.api import v2
from cirq_google.experimental.noise_models.calibration_to_noise_properties import (
noise_properties_from_calibration,
)
from google.protobuf.text_format import Merge
import numpy as np


def test_noise_properties_from_calibration():
xeb_1 = 0.999
xeb_2 = 0.996

p00_1 = 0.001
p00_2 = 0.002
p00_3 = 0.003

t1_1 = 0.005
t1_2 = 0.007
t1_3 = 0.003

_CALIBRATION_DATA = Merge(
f"""
timestamp_ms: 1579214873,
metrics: [{{
name: 'xeb',
targets: ['0_0', '0_1'],
values: [{{
double_val: {xeb_1}
}}]
}}, {{
name: 'xeb',
targets: ['0_0', '1_0'],
values: [{{
double_val:{xeb_2}
}}]
}}, {{
name: 'single_qubit_p00_error',
targets: ['0_0'],
values: [{{
double_val: {p00_1}
}}]
}}, {{
name: 'single_qubit_p00_error',
targets: ['0_1'],
values: [{{
double_val: {p00_2}
}}]
}}, {{
name: 'single_qubit_p00_error',
targets: ['1_0'],
values: [{{
double_val: {p00_3}
}}]
}}, {{
name: 'single_qubit_readout_separation_error',
targets: ['0_0'],
values: [{{
double_val: .004
}}]
}}, {{
name: 'single_qubit_readout_separation_error',
targets: ['0_1'],
values: [{{
double_val: .005
}}]
}},{{
name: 'single_qubit_readout_separation_error',
targets: ['1_0'],
values: [{{
double_val: .006
}}]
}}, {{
name: 'single_qubit_idle_t1_micros',
targets: ['0_0'],
values: [{{
double_val: {t1_1}
}}]
}}, {{
name: 'single_qubit_idle_t1_micros',
targets: ['0_1'],
values: [{{
double_val: {t1_2}
}}]
}}, {{
name: 'single_qubit_idle_t1_micros',
targets: ['1_0'],
values: [{{
double_val: {t1_3}
}}]
}}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)

# Create NoiseProperties object from Calibration
calibration = cirq_google.Calibration(_CALIBRATION_DATA)
prop = noise_properties_from_calibration(calibration)

expected_t1_nanos = np.mean([t1_1, t1_2, t1_3]) * 1000
expected_xeb_fidelity = np.mean([xeb_1, xeb_2])
expected_p00 = np.mean([p00_1, p00_2, p00_3])

assert np.isclose(prop.t1_ns, expected_t1_nanos)
assert np.isclose(prop.xeb, expected_xeb_fidelity)
assert np.isclose(prop.p00, expected_p00)


def test_from_calibration_rb():
rb_pauli_1 = 0.001
rb_pauli_2 = 0.002
rb_pauli_3 = 0.003

_CALIBRATION_DATA_RB = Merge(
f"""
timestamp_ms: 1579214873,
metrics: [{{
name: 'single_qubit_rb_pauli_error_per_gate',
targets: ['0_0'],
values: [{{
double_val: {rb_pauli_1}
}}]
}}, {{
name: 'single_qubit_rb_pauli_error_per_gate',
targets: ['0_1'],
values: [{{
double_val: {rb_pauli_2}
}}]
}}, {{
name: 'single_qubit_rb_pauli_error_per_gate',
targets: ['1_0'],
values: [{{
double_val: {rb_pauli_3}
}}]
}}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)

# Create NoiseProperties object from Calibration
rb_calibration = cirq_google.Calibration(_CALIBRATION_DATA_RB)
rb_noise_prop = noise_properties_from_calibration(rb_calibration)

average_pauli_rb = np.mean([rb_pauli_1, rb_pauli_2, rb_pauli_3])
assert np.isclose(average_pauli_rb, rb_noise_prop.pauli_error)


def test_validate_calibration():
# RB Pauli error and RB Average Error disagree
rb_pauli_error = 0.05
rb_average_error = 0.1

decay_constant_pauli = 1 - rb_pauli_error / (1 - 1 / 4)
decay_constant_average = 1 - rb_average_error / (1 - 1 / 2)
_CALIBRATION_DATA_PAULI_AVERAGE = Merge(
f"""
timestamp_ms: 1579214873,
metrics: [{{
name: 'single_qubit_rb_pauli_error_per_gate',
targets: ['0_0'],
values: [{{
double_val: {rb_pauli_error}
}}]
}}, {{
name: 'single_qubit_rb_average_error_per_gate',
targets: ['0_1'],
values: [{{
double_val: {rb_average_error}
}}]
}}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)
bad_calibration_pauli_average = cirq_google.Calibration(_CALIBRATION_DATA_PAULI_AVERAGE)
with pytest.raises(
ValueError,
match=f'Decay constant from RB Pauli error: {decay_constant_pauli}, '
f'decay constant from RB Average error: {decay_constant_average}. '
'If validation is disabled, RB Pauli error will be used.',
):
noise_properties_from_calibration(bad_calibration_pauli_average)

assert np.isclose(
noise_properties_from_calibration(
bad_calibration_pauli_average, validate=False
).pauli_error,
rb_pauli_error,
)

# RB Pauli Error and XEB Fidelity disagree
xeb_fidelity = 0.99

decay_constant_from_xeb = 1 - (1 - xeb_fidelity) / (1 - 1 / 4)

_CALIBRATION_DATA_PAULI_XEB = Merge(
f"""
timestamp_ms: 1579214873,
metrics: [{{
name: 'single_qubit_rb_pauli_error_per_gate',
targets: ['0_0'],
values: [{{
double_val: {rb_pauli_error}
}}]
}}, {{
name: 'xeb',
targets: ['0_0', '1_0'],
values: [{{
double_val:{xeb_fidelity}
}}]
}}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)

bad_calibration_pauli_xeb = cirq_google.Calibration(_CALIBRATION_DATA_PAULI_XEB)
with pytest.raises(
ValueError,
match=f'Decay constant from RB Pauli error: {decay_constant_pauli}, '
f'decay constant from XEB Fidelity: {decay_constant_from_xeb}. '
'If validation is disabled, RB Pauli error will be used.',
):
noise_properties_from_calibration(bad_calibration_pauli_xeb)

# RB Average Error and XEB Fidelity disagree
_CALIBRATION_DATA_AVERAGE_XEB = Merge(
f"""
timestamp_ms: 1579214873,
metrics: [{{
name: 'single_qubit_rb_average_error_per_gate',
targets: ['0_0'],
values: [{{
double_val: {rb_average_error}
}}]
}}, {{
name: 'xeb',
targets: ['0_0', '1_0'],
values: [{{
double_val:{xeb_fidelity}
}}]
}}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)

bad_calibration_average_xeb = cirq_google.Calibration(_CALIBRATION_DATA_AVERAGE_XEB)
with pytest.raises(
ValueError,
match=f'Decay constant from RB Average error: {decay_constant_average}, '
f'decay constant from XEB Fidelity: {decay_constant_from_xeb}. '
'If validation is disabled, XEB Fidelity will be used.',
):
noise_properties_from_calibration(bad_calibration_average_xeb)

assert np.isclose(
noise_properties_from_calibration(bad_calibration_average_xeb, validate=False).xeb,
xeb_fidelity,
)

# Calibration data with no RB error or XEB fidelity
t1 = 2.0 # microseconds

_CALIBRATION_DATA_T1 = Merge(
f"""
timestamp_ms: 1579214873,
metrics: [{{
name: 'single_qubit_idle_t1_micros',
targets: ['0_0'],
values: [{{
double_val: {t1}
}}]
}}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)

calibration_t1 = cirq_google.Calibration(_CALIBRATION_DATA_T1)

assert np.isclose(noise_properties_from_calibration(calibration_t1).t1_ns, t1 * 1000)

0 comments on commit e6c584b

Please sign in to comment.