Skip to content

Commit

Permalink
Merge 2a54499 into b7ae604
Browse files Browse the repository at this point in the history
  • Loading branch information
thangleiter committed Jun 19, 2020
2 parents b7ae604 + 2a54499 commit d4c94ae
Show file tree
Hide file tree
Showing 21 changed files with 992 additions and 520 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Y2.cache_filter_function(omega)
X.cache_filter_function(omega)

hadamard = Y2 @ X # equivalent: ff.concatenate((Y2, X))
hadamard.is_cached('F')
hadamard.is_cached('filter function')
# True (filter function cached during concatenation)
```

Expand Down

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion doc/source/examples/examples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ This directory contains static examples that can also be run interactively from
getting_started
periodic_driving
advanced_concatenation
calculating_error_transfer_matrices
calculating_quantum_processes
extending_pulses
qutip_integration
quantum_fourier_transform
2 changes: 1 addition & 1 deletion doc/source/examples/getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
" {H}_n &= \\sum_\\alpha s_\\alpha(t) b_\\alpha(t) B_\\alpha \\\\\n",
"\\end{align*}\n",
"\n",
"where $A_i$ and $B_\\alpha$ are the control and noise operators, respectively, $a_i(t)$ the control strength of $A_i$ at time $t$, $s_\\alpha(t)$ the sensitivity at time $t$ of the system to noise source $\\alpha$, and $b_\\alpha(t)$ classically fluctuating noise variables. Since the noise is captured in a spectral density function that accounts for different realizations of the random noise, the $b_\\alpha(t)$ are not required at instantiation of a `PulseSequence` instance. Note that we always calculate in units where $\\hbar\\equiv 1$.\n",
"where $A_i$ and $B_\\alpha$ are the control and noise operators, respectively, $a_i(t)$ the control strength of $A_i$ at time $t$, $s_\\alpha(t)$ a deterministic time dependence of the noise operators to model, for instance, non-linear coupling to the noise sources, and $b_\\alpha(t)$ classically fluctuating noise variables. Since the noise is captured in a spectral density function that accounts for different realizations of the random noise, the $b_\\alpha(t)$ are not required at instantiation of a `PulseSequence` instance. Note that we always calculate in units where $\\hbar\\equiv 1$.\n",
"\n",
"The `PulseSequence` class requires three positional arguments at instantiation; `H_c`, `H_n`, and `dt`, with `dt` the time-deltas of piece-wise constant control. The former two represent the control and noise Hamiltonians and are passed in the same nested-list-of-lists structure of operators and coefficient lists similar to that required by [QuTiP](http://qutip.org/) (the difference being that QuTiP requires implicit functions for calculating the coefficients instead of explicit values). Optionally, we pass unique identifiers for each operator as a third element of the list. That is, \n",
"\n",
Expand Down
22 changes: 11 additions & 11 deletions doc/source/examples/periodic_driving.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@
{
"data": {
"text/plain": [
"<matplotlib.legend.Legend at 0x7fb642ac4fa0>"
"<matplotlib.legend.Legend at 0x220172b7490>"
]
},
"execution_count": 6,
Expand Down Expand Up @@ -249,7 +249,7 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "30b23e89df10479c8c339c7abd7c19ff",
"model_id": "04190ea704d242d58228b8357a015184",
"version_major": 2,
"version_minor": 0
},
Expand Down Expand Up @@ -296,16 +296,16 @@
"output_type": "stream",
"text": [
"===========================================\n",
"ATOMIC initialization\t\t : 0.0013 s\n",
"ATOMIC filter function\t\t : 0.0395 s\n",
"NOT concatenation (periodic)\t : 0.0837 s\n",
"NOT concatenation (standard)\t : 5.5189 s\n",
"ECHO concatenation\t\t : 0.0384 s\n",
"ATOMIC initialization\t\t : 0.0018 s\n",
"ATOMIC filter function\t\t : 0.0354 s\n",
"NOT concatenation (periodic)\t : 0.1257 s\n",
"NOT concatenation (standard)\t : 5.8858 s\n",
"ECHO concatenation\t\t : 0.0454 s\n",
"-------------------------------------------\n",
"Total (periodic)\t\t : 0.1628 s\n",
"Total (standard)\t\t : 5.5980 s\n",
"Total (periodic)\t\t : 0.2083 s\n",
"Total (standard)\t\t : 5.9684 s\n",
"===========================================\n",
"Total (brute force)\t\t : 148.03 s\n",
"Total (brute force)\t\t : 242.65 s\n",
"===========================================\n"
]
}
Expand Down Expand Up @@ -353,7 +353,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.1"
"version": "3.8.3"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
Expand Down
2 changes: 1 addition & 1 deletion doc/source/examples/quantum_fourier_transform.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1094,7 +1094,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.1"
"version": "3.8.3"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
Expand Down
2 changes: 1 addition & 1 deletion doc/source/examples/qutip_integration.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.1"
"version": "3.8.3"
}
},
"nbformat": 4,
Expand Down
3 changes: 2 additions & 1 deletion examples/qft.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import filter_functions as ff
import qutip as qt
from qutip import qip
from qutip.qip.algorithms.qft import qft as qt_qft

# %% Define some functions

Expand Down Expand Up @@ -147,7 +148,7 @@ def QFT_pulse(N: int = 4, tau: float = 1):
prop = ff.util.mdot(swaps) @ QFT.total_Q
qt.matrix_histogram_complex(prop)
print('Correct action: ',
ff.util.oper_equiv(prop, qip.algorithms.qft.qft(N), eps=1e-14))
ff.util.oper_equiv(prop, qt_qft(N), eps=1e-14))

fig, ax, _ = ff.plot_filter_function(QFT, omega)
# Move the legend to the side because of many entries
Expand Down
87 changes: 46 additions & 41 deletions examples/randomized_benchmarking.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
between gates captured in the filter functions.
"""
import time
from typing import Dict, Sequence

import numpy as np
import qutip as qt
Expand All @@ -37,15 +38,15 @@
# %%


def fitfun(m, A, B):
return A*m + B
def fitfun(m, A):
return 1 - A*m


def state_infidelity(pulse: PulseSequence, S: ndarray, omega: ndarray,
ind: int = 2) -> float:
ind: int = 3) -> float:
"""Compute state infidelity for input state eigenstate of pauli *ind*"""
R = pulse.get_control_matrix(omega)
F = np.einsum('jko->jo', util.abs2(R[:, np.delete([0, 1, 2], ind)]))
F = np.einsum('jko->jo', util.abs2(R[:, np.delete([0, 1, 2, 3], ind)]))
return np.trapz(F*S, omega)/(2*np.pi*pulse.d)


Expand All @@ -68,8 +69,10 @@ def find_inverse(U: ndarray) -> ndarray:


def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
omega):
infidelities = np.empty((N_l, N_G), dtype=float)
alpha: Sequence[float],
spectra: Dict[float, Sequence[float]],
omega: Sequence[float]):
infidelities = {a: np.empty((N_l, N_G), dtype=float) for a in alpha}
lengths = np.round(np.linspace(min_l, max_l, N_l)).astype(int)
delta_t = []
t_now = [time.perf_counter()]
Expand All @@ -85,9 +88,10 @@ def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
U = ff.concatenate(cliffords[randints])
U_inv = find_inverse(U.total_Q)
pulse_sequence = U @ U_inv
infidelities[l, j] = state_infidelity(
pulse_sequence, S, omega
).sum()
for k, a in enumerate(alpha):
infidelities[a][l, j] = state_infidelity(
pulse_sequence, spectra[a], omega
).sum()

return infidelities, delta_t

Expand All @@ -97,17 +101,17 @@ def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
H_n = {}
H_c = {}
dt = {}
H_c['Id'] = [[qt.sigmax().full(), [0], 'X']]
H_n['Id'] = [[qt.sigmax().full(), [1], 'X'],
[qt.sigmay().full(), [1], 'Y']]
H_c['Id'] = [[qt.sigmax().full()/2, [0], 'X']]
H_n['Id'] = [[qt.sigmax().full()/2, [1], 'X'],
[qt.sigmay().full()/2, [1], 'Y']]
dt['Id'] = [T]
H_c['X2'] = [[qt.sigmax().full(), [np.pi/4/T], 'X']]
H_n['X2'] = [[qt.sigmax().full(), [1], 'X'],
[qt.sigmay().full(), [1], 'Y']]
H_c['X2'] = [[qt.sigmax().full()/2, [np.pi/2/T], 'X']]
H_n['X2'] = [[qt.sigmax().full()/2, [1], 'X'],
[qt.sigmay().full()/2, [1], 'Y']]
dt['X2'] = [T]
H_c['Y2'] = [[qt.sigmay().full(), [np.pi/4/T], 'Y']]
H_n['Y2'] = [[qt.sigmax().full(), [1], 'X'],
[qt.sigmay().full(), [1], 'Y']]
H_c['Y2'] = [[qt.sigmay().full()/2, [np.pi/2/T], 'Y']]
H_n['Y2'] = [[qt.sigmax().full()/2, [1], 'X'],
[qt.sigmay().full()/2, [1], 'Y']]
dt['Y2'] = [T]

# %% Set up PulseSequences
Expand All @@ -134,7 +138,7 @@ def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
# %% Construct Clifford group
tic = time.perf_counter()
cliffords = np.array([
Id, # Id
Y2 @ Y2 @ Y2 @ Y2, # Id
X2 @ X2, # X
Y2 @ Y2, # Y
Y2 @ Y2 @ X2 @ X2, # Z
Expand Down Expand Up @@ -166,8 +170,10 @@ def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
# %% Run simulation

eps0 = 2.7241e-4
# Scaling factor for the noise so that alpha = 0 and alpha = 0.7 have the same
# power

alpha = (0.0, 0.7)
# Scaling factor for the noise so that alpha = 0 and alpha = 0.7 give the same
# average clifford fidelity
noise_scaling_factor = {
0.0: 0.4415924985735799,
0.7: 1
Expand All @@ -176,37 +182,36 @@ def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
state_infidelities = {}
clifford_infidelities = {}

for i, alpha in enumerate((0.0, 0.7)):
S0 = 1e-13*(2*np.pi*1e-3)**alpha/eps0**2*noise_scaling_factor[alpha]
S = S0/omega**alpha
spectra = {}
for i, a in enumerate(alpha):
S0 = 4e-11*(2*np.pi*1e-3)**a/eps0**2*noise_scaling_factor[a]
S = S0/omega**a
spectra[a], omega_twosided = util.symmetrize_spectrum(S, omega)

# Need to calculate with two-sided spectra
clifford_infidelities[alpha] = [
ff.infidelity(C, *util.symmetrize_spectrum(S, omega)).sum()
clifford_infidelities[a] = [
ff.infidelity(C, spectra[a], omega_twosided).sum()
for C in cliffords
]

print('=============================================')
print('\t\talpha = {}'.format(alpha))
print('=============================================')
state_infidelities[alpha], exec_times = run_randomized_benchmarking(
N_G, N_l, m_min, m_max, omega
)
state_infidelities, exec_times = run_randomized_benchmarking(
N_G, N_l, m_min, m_max, alpha, spectra, omega_twosided
)

# %% Plot results
fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(8, 3))

fidelities = {alpha: 1 - infid for alpha, infid in state_infidelities.items()}
for i, alpha in enumerate((0.0, 0.7)):
fidelities = {a: 1 - infid for a, infid in state_infidelities.items()}
for i, a in enumerate(alpha):

means = np.mean(fidelities[alpha], axis=1)
stds = np.std(fidelities[alpha], axis=1)
means = np.mean(fidelities[a], axis=1)
stds = np.std(fidelities[a], axis=1)

popt, pcov = optimize.curve_fit(fitfun, lengths, means, [0, 1], stds,
popt, pcov = optimize.curve_fit(fitfun, lengths, means, [0], stds,
absolute_sigma=True)

for j in range(N_G):
fid = ax[i].plot(lengths, fidelities[alpha][:, j], 'k.', alpha=0.1,
fid = ax[i].plot(lengths, fidelities[a][:, j], 'k.', alpha=0.1,
zorder=2)

mean = ax[i].errorbar(lengths, means, stds, fmt='.', zorder=3,
Expand All @@ -217,16 +222,16 @@ def run_randomized_benchmarking(N_G: int, N_l: int, min_l: int, max_l: int,
# sequence length and r = 1 - F_avg = d/(d + 1)*(1 - F_ent) the average
# error per gate
exp = ax[i].plot(lengths,
1 - np.mean(clifford_infidelities[alpha])*lengths*2/3,
1 - np.mean(clifford_infidelities[a])*lengths*2/3,
'--', zorder=4, color='tab:blue')
ax[i].set_title(r'$\alpha = {}$'.format(alpha))
ax[i].set_title(r'$\alpha = {}$'.format(a))
ax[i].set_xlabel(r'Sequence length $m$')

handles = [fid[0], mean[0], fit[0], exp[0]]
labels = ['State Fidelity', 'Fidelity mean', 'Fit',
'RB theory w/o pulse correlations']
ax[0].set_xlim(0, max(lengths))
ax[0].set_ylim(.993, 1)
# ax[0].set_ylim(.9, 1)
ax[0].set_ylabel(r'Surival Probability')
ax[0].legend(frameon=False, handles=handles, labels=labels)

Expand Down
4 changes: 2 additions & 2 deletions filter_functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from .numeric import (error_transfer_matrix, infidelity,
liouville_representation)
from .plotting import (
plot_bloch_vector_evolution, plot_error_transfer_matrix,
plot_bloch_vector_evolution, plot_cumulant_function,
plot_filter_function, plot_pulse_correlation_filter_function,
plot_pulse_train)
from .pulse_sequence import (PulseSequence, concatenate, concatenate_periodic,
Expand All @@ -34,7 +34,7 @@
__all__ = ['Basis', 'PulseSequence', 'analytic', 'basis', 'concatenate',
'concatenate_periodic', 'error_transfer_matrix', 'extend',
'infidelity', 'liouville_representation', 'numeric',
'plot_bloch_vector_evolution', 'plot_error_transfer_matrix',
'plot_bloch_vector_evolution', 'plot_cumulant_function',
'plot_filter_function', 'plot_pulse_correlation_filter_function',
'plot_pulse_train', 'plotting', 'pulse_sequence', 'remap', 'util']

Expand Down
2 changes: 1 addition & 1 deletion filter_functions/basis.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,7 @@ def expand(M: Union[ndarray, Basis], basis: Union[ndarray, Basis],
{\mathrm{tr}\big(C_j^\dagger C_j\big)}.
"""
coefficients = np.einsum('...ij,bji->...b', np.asarray(M), basis)
coefficients = np.tensordot(M, basis, axes=[(-2, -1), (-1, -2)])

if not normalized:
coefficients /= np.einsum('bij,bji->b', basis, basis).real
Expand Down
Loading

0 comments on commit d4c94ae

Please sign in to comment.