Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
418 changes: 202 additions & 216 deletions docs/tutorials/rb_example.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions qiskit_experiments/analysis/curve_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,11 +223,11 @@ def multi_curve_fit(
wsigma[idxs[i]] = sigma[idxs[i]] / np.sqrt(weights[i])

# Define multi-objective function
def f(x, *params):
def f(x, *args, **kwargs):
y = np.zeros(x.size)
for i in range(num_funcs):
xi = x[idxs[i]]
yi = funcs[i](xi, *params)
yi = funcs[i](xi, *args, **kwargs)
y[idxs[i]] = yi
return y

Expand Down
4 changes: 2 additions & 2 deletions qiskit_experiments/base_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def run(
try:
analysis_results, figures = self._run_analysis(experiment_data, **options)
analysis_results["success"] = True
except Exception:
analysis_results = AnalysisResult(success=False)
except Exception as ex:
analysis_results = AnalysisResult(success=False, error_message=ex)
figures = None

# Save to experiment data
Expand Down
148 changes: 93 additions & 55 deletions qiskit_experiments/randomized_benchmarking/interleaved_rb_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@
process_multi_curve_data,
multi_curve_fit,
)
from qiskit_experiments.analysis import plotting
from qiskit_experiments.analysis.data_processing import (
level2_probability,
multi_mean_xy_data,
)
from qiskit_experiments.analysis import plotting

from .rb_analysis import RBAnalysis


Expand All @@ -37,11 +38,10 @@ class InterleavedRBAnalysis(RBAnalysis):

The error bounds are given by
:math:`E=\min\left\{ \begin{array}{c}
\frac{\left(d-1\right)\left[\left|p-p_{\overline{\mathcal{C}}}/p\right|+\left(1-p\right)\right]}{d}\\
\frac{\left(d-1\right)\left[\left|p-p_{\overline{\mathcal{C}}}\right|+\left(1-p\right)\right]}{d}\\
Copy link
Contributor

@ShellyGarion ShellyGarion May 18, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the systematic error bounds calculation remain the same in this case?
Need to comment that the notation here is not the same as in the original interleaved RB paper,
namely that p_C here denotes p_C/p

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not 100% sure, I was hoping you or @gadial could double check this.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks OK to me, but maybe it's better to ask one of the co-authors of the original paper?
(and one should also comment that the notation is different than the original paper)

\frac{2\left(d^{2}-1\right)\left(1-p\right)}{pd^{2}}+\frac{4\sqrt{1-p}\sqrt{d^{2}-1}}{p}
\end{array}\right.`
"""

# pylint: disable=invalid-name
def _run_analysis(
self,
Expand All @@ -50,37 +50,29 @@ def _run_analysis(
plot: bool = True,
ax: Optional["matplotlib.axes.Axes"] = None,
):

data = experiment_data.data()
num_qubits = len(data[0]["metadata"]["qubits"])

# Process data
def data_processor(datum):
return level2_probability(datum, datum["metadata"]["ylabel"])
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think having outcome label in circuit metadata is reasonable. I assumed processor option can be obtained from the metadata without extra data processing. Do you prefer having such extra data processing capability to generate data processor options?

https://github.com/Qiskit/qiskit-experiments/blob/1f64b4236864d28a7682da44e72f2bab2242013e/qiskit_experiments/analysis/curve_analysis.py#L367-L374

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I prefer having it as part of the metadata as well, but have no strong feelings about it (note that if we change this we can remove this metadata from rb_experiment).

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seemed redundant to add the measurement label in metadata for every circuit when it's always the same and only depends on the number of qubits. Maybe it needs to be added if this is later a subclassed from a generic curve fit experiment.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So we naively assume all 1 probability here (if outcome is not specified in the metadata)? I'm fine with this approach.

return level2_probability(datum, num_qubits * "0")

num_qubits = len(experiment_data.data[0]["metadata"]["qubits"])
series, x, y, sigma = process_multi_curve_data(experiment_data.data, data_processor)
series, xdata, ydata, ydata_sigma = multi_mean_xy_data(series, x, y, sigma)
# Raw data for each sample
series_raw, x_raw, y_raw, sigma_raw = process_multi_curve_data(data, data_processor)

def fit_fun_standard(x, a, alpha_std, _, b):
return a * alpha_std ** x + b
# Data averaged over samples
series, xdata, ydata, ydata_sigma = multi_mean_xy_data(series_raw, x_raw, y_raw, sigma_raw)

def fit_fun_interleaved(x, a, _, alpha_int, b):
return a * alpha_int ** x + b
# pylint: disable = unused-argument
def fit_fun_standard(x, a, alpha, alpha_c, b):
return a * alpha ** x + b

std_idx = series == 0
std_xdata = xdata[std_idx]
std_ydata = ydata[std_idx]
std_ydata_sigma = ydata_sigma[std_idx]
p0_std = self._p0(std_xdata, std_ydata, num_qubits)
def fit_fun_interleaved(x, a, alpha, alpha_c, b):
return a * (alpha * alpha_c) ** x + b

int_idx = series == 1
int_xdata = xdata[int_idx]
int_ydata = ydata[int_idx]
int_ydata_sigma = ydata_sigma[int_idx]
p0_int = self._p0(int_xdata, int_ydata, num_qubits)

p0 = (
np.mean([p0_std[0], p0_int[0]]),
p0_std[1],
p0_int[1],
np.mean([p0_std[2], p0_int[2]]),
)
p0 = self._p0_multi(series, xdata, ydata, num_qubits)
bounds = {"a": [0, 1], "alpha": [0, 1], "alpha_c": [0, 1], "b": [0, 1]}

analysis_result = multi_curve_fit(
[fit_fun_standard, fit_fun_interleaved],
Expand All @@ -89,51 +81,97 @@ def fit_fun_interleaved(x, a, _, alpha_int, b):
ydata,
p0,
ydata_sigma,
bounds=([0, 0, 0, 0], [1, 1, 1, 1]),
bounds=bounds,
)

# Add EPC data
nrb = 2 ** num_qubits
scale = (nrb - 1) / (2 ** nrb)
scale = (nrb - 1) / nrb
_, alpha, alpha_c, _ = analysis_result["popt"]
_, alpha_err, alpha_c_err, _ = analysis_result["popt_err"]
_, _, alpha_c_err, _ = analysis_result["popt_err"]

# Calculate epc_est (=r_c^est) - Eq. (4):
epc_est = scale * (1 - alpha_c / alpha)
epc_est = scale * (1 - alpha_c)
epc_est_err = scale * alpha_c_err
analysis_result["EPC"] = epc_est
analysis_result["EPC_err"] = epc_est_err

# Calculate the systematic error bounds - Eq. (5):
systematic_err_1 = scale * (abs(alpha - alpha_c / alpha) + (1 - alpha))
systematic_err_1 = scale * (abs(alpha - alpha_c) + (1 - alpha))
systematic_err_2 = (
2 * (nrb * nrb - 1) * (1 - alpha) / (alpha * nrb * nrb)
+ 4 * (np.sqrt(1 - alpha)) * (np.sqrt(nrb * nrb - 1)) / alpha
)
systematic_err = min(systematic_err_1, systematic_err_2)
systematic_err_l = epc_est - systematic_err
systematic_err_r = epc_est + systematic_err
analysis_result["EPC_systematic_err"] = systematic_err
analysis_result["EPC_systematic_bounds"] = [max(systematic_err_l, 0), systematic_err_r]

if plot and plotting.HAS_MATPLOTLIB:
ax = plotting.plot_curve_fit(fit_fun_standard, analysis_result, ax=ax, color="blue")
ax = plotting.plot_curve_fit(
fit_fun_interleaved,
analysis_result,
ax=ax,
color="green",
)
ax = self._generate_multi_scatter_plot(series_raw, x_raw, y_raw, ax=ax)
ax = self._generate_multi_errorbar_plot(series, xdata, ydata, ydata_sigma, ax=ax)
self._format_plot(ax, analysis_result)
ax.legend(loc="center right")
figures = [ax.get_figure()]
else:
figures = None
return analysis_result, figures

@staticmethod
def _generate_multi_scatter_plot(series, xdata, ydata, ax):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that the points of the standard and interleaved data are denoted the same (green x), only the fitting curve and mean and bars have separate colors

"""Generate scatter plot of raw data"""
idx0 = series == 0
idx1 = series == 1
ax = plotting.plot_scatter(xdata[idx0], ydata[idx0], ax=ax)
ax = plotting.plot_scatter(xdata[idx1], ydata[idx1], ax=ax, marker="+", c="darkslategrey")
return ax

alpha_err_sq = (alpha_err / alpha) ** 2
alpha_c_err_sq = (alpha_c_err / alpha_c) ** 2
epc_est_err = (
((nrb - 1) / nrb) * (alpha_c / alpha) * (np.sqrt(alpha_err_sq + alpha_c_err_sq))
@staticmethod
def _generate_multi_errorbar_plot(series, xdata, ydata, sigma, ax):
"""Generate errorbar plot of average data"""
idx0 = series == 0
idx1 = series == 1
ax = plotting.plot_errorbar(
xdata[idx0],
ydata[idx0],
sigma[idx0],
ax=ax,
label="Standard",
marker=".",
color="red",
)
ax = plotting.plot_errorbar(
xdata[idx1],
ydata[idx1],
sigma[idx1],
ax=ax,
label="Interleaved",
marker="^",
color="orange",
)
return ax

analysis_result["EPC"] = epc_est
analysis_result["EPC_err"] = epc_est_err
analysis_result["systematic_err"] = systematic_err
analysis_result["systematic_err_L"] = systematic_err_l
analysis_result["systematic_err_R"] = systematic_err_r
analysis_result["plabels"] = ["A", "alpha", "alpha_c", "B"]

if plot:
ax = plotting.plot_curve_fit(fit_fun_standard, analysis_result, ax=ax)
ax = plotting.plot_curve_fit(fit_fun_interleaved, analysis_result, ax=ax)
ax = plotting.plot_scatter(std_xdata, std_ydata, ax=ax)
ax = plotting.plot_scatter(int_xdata, int_ydata, ax=ax)
ax = plotting.plot_errorbar(std_xdata, std_ydata, std_ydata_sigma, ax=ax)
ax = plotting.plot_errorbar(int_xdata, int_ydata, int_ydata_sigma, ax=ax)
self._format_plot(ax, analysis_result)
analysis_result.plt = plotting.pyplot

return analysis_result, None
@staticmethod
def _p0_multi(series, xdata, ydata, num_qubits):
"""Initial guess for the fitting function"""
std_idx = series == 0
p0_std = RBAnalysis._p0(xdata[std_idx], ydata[std_idx], num_qubits)
int_idx = series == 1
p0_int = RBAnalysis._p0(xdata[int_idx], xdata[int_idx], num_qubits)
return {
"a": np.mean([p0_std["a"], p0_int["a"]]),
"alpha": p0_std["alpha"],
"alpha_c": min(p0_int["alpha"] / p0_std["alpha"], 1),
"b": np.mean([p0_std["b"], p0_int["b"]]),
}

@classmethod
def _format_plot(cls, ax, analysis_result, add_label=True):
Copy link
Collaborator

@nkanazawa1989 nkanazawa1989 May 18, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you think fit values is needed now? In old Ignis implementation this is kind of necessary because fit values are protected member. But now these values can be seen in the repr of analysis result object.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's nice to keep the info in the figure so you can get the main details without having to also look at the result.

Expand Down
2 changes: 1 addition & 1 deletion qiskit_experiments/randomized_benchmarking/rb_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def data_processor(datum):
return level2_probability(datum, num_qubits * "0")

# Raw data for each sample
x_raw, y_raw, sigma_raw = process_curve_data(data, data_processor, x_key="xdata")
x_raw, y_raw, sigma_raw = process_curve_data(data, data_processor)

# Data averaged over samples
xdata, ydata, ydata_sigma = mean_xy_data(x_raw, y_raw, sigma_raw, method="sample")
Expand Down