-
Notifications
You must be signed in to change notification settings - Fork 55
/
rbtools.py
253 lines (188 loc) · 7.08 KB
/
rbtools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
"""
Tools for analyzing RB data
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
def p_to_r(p, d, rtype='EI'):
"""
Converts an RB decay constant (`p`) to the RB error rate (`r`).
Here `p` is (normally) obtained from fitting data to `A + Bp^m`. There are two
'types' of RB error rate corresponding to different rescalings of `1 - p`.
These are the entanglement infidelity (EI) type r and the average gate
infidelity (AGI) type `r`. The EI-type `r` is given by:
`r = (d^2 - 1)(1 - p)/d^2`,
where `d` is the dimension of the system (i.e., 2^n for n qubits).
The AGI-type `r` is given by
`r = (d - 1)(1 - p)/d`.
For RB on gates whereby every gate is followed by an n-qubit
uniform depolarizing channel (the most idealized RB scenario)
then the EI-type (AGI-type) r corresponds to the EI (AGI) of
the depolarizing channel to the identity channel.
The default (EI) is the convention used in direct RB, and is perhaps
the most well-motivated as then r corresponds to the error probablity
of the gates (in the idealized pauli-errors scenario). AGI is
the convention used throughout Clifford RB theory.
Parameters
----------
p : float
Fit parameter p from P_m = A + B*p**m.
d : int
Number of dimensions of the Hilbert space
rtype : {'EI','AGI'}, optional
The RB error rate rescaling convention.
Returns
-------
r : float
The RB error rate
"""
if rtype == 'AGI': r = (1 - p) * (d - 1) / d
elif rtype == 'EI': r = (d**2 - 1) * (1 - p) / d**2
else:
raise ValueError("rtype must be `EI` (for entanglement infidelity) or `AGI` (for average gate infidelity)")
return r
def r_to_p(r, d, rtype='EI'):
"""
Inverse of the p_to_r function.
Parameters
----------
r : float
The RB error rate
d : int
Number of dimensions of the Hilbert space
rtype : {'EI','AGI'}, optional
The RB error rate rescaling convention.
Returns
-------
p : float
The RB decay constant
"""
if rtype == 'AGI': p = 1 - d * r / (d - 1)
elif rtype == 'EI': p = 1 - d**2 * r / (d**2 - 1)
else:
raise ValueError("rtype must be `EI` (for entanglement infidelity) or `AGI` (for average gate infidelity)")
return p
def adjusted_success_probability(hamming_distance_pdf):
"""
The success probabilitys adjusted by hamming weights.
TODO: docstring - more explanation
Parameters
----------
hamming_distance_pdf : <TODO typ>
<TODO description>
Returns
-------
numpy.ndarray
"""
#adjSP = _np.sum([(-1 / 2)**n * hamming_distance_counts[n] for n in range(numqubits + 1)]) / total_counts
adjSP = _np.sum([(-1 / 2)**n * hamming_distance_pdf[n] for n in range(len(hamming_distance_pdf))])
return adjSP
def marginalized_success_counts(dsrow, circ, target, qubits):
"""
Marginalize the success counts over qubits.
Parameters
----------
dsrow : _DataSetRow
The circuit outcome data to marginalize.
circ : Circuit
The circuit.
target : str
The ideal outcome, e.g. `"0010"`.
qubits : tuple
The qubit labels that are retained after the marginalization.
Returns
-------
int
The number of success counts.
"""
if dsrow.total == 0:
return 0
else:
# The rows of the circuit that we are interested in
indices = [circ.line_labels.index(q) for q in qubits]
# The ordering of this must be the same as what we compare it to.
margtarget = ''.join([target[i] for i in indices])
if qubits == circ.line_labels:
try:
return dsrow.counts[target]
except:
return 0
else:
success_counts = 0
for (outbitstring,), counts in dsrow.counts.items():
if ''.join([outbitstring[i] for i in indices]) == margtarget:
success_counts += counts
return success_counts
def hamming_distance(bs1, bs2):
"""
TODO: docstring
Parameters
----------
bs1 : <TODO typ>
<TODO description>
bs2 : <TODO typ>
<TODO description>
Returns
-------
<TODO typ>
"""
return _np.sum([b1 != b2 for b1, b2 in zip(bs1, bs2)])
def marginalized_hamming_distance_counts(dsrow, circ, target, qubits):
"""
TODO: docstring
Parameters
----------
dsrow : _DataSetRow
The circuit outcome data to marginalize.
circ : Circuit
The circuit.
target : str
The ideal outcome, e.g. `"0010"`.
qubits : tuple
The qubit labels that are retained after the marginalization.
Returns
-------
list
"""
if dsrow.total == 0:
hamming_distance_counts = [0 for i in range(len(qubits) + 1)]
else:
# The rows of the circuit that we are interested in
indices = [circ.line_labels.index(q) for q in qubits]
# The ordering of this must be the same as what we compare it to.
margtarget = ''.join([target[i] for i in indices])
hamming_distance_counts = _np.zeros(len(qubits) + 1, float)
for (outbitstring,), counts in dsrow.counts.items():
#print(outbitstring)
hamming_distance_counts[hamming_distance(''.join([outbitstring[i] for i in indices]), margtarget)] += counts
hamming_distance_counts = list(hamming_distance_counts)
return hamming_distance_counts
def rescaling_factor(lengths, quantity, offset=2):
"""
Finds a rescaling value `alpha` that maps the Clifford RB decay constant `p` to `p_(rescaled) = p^(1/alpha)`.
This can be used for finding, e.g., a "CRB r per CNOT" or a "CRB r per compiled Clifford depth".
Parameters
----------
lengths : list
A list of the RB lengths, which each value in 'quantity' will be rescaled by.
quantity : list
A list, of the same length as `lengths`, that contains lists of values of the quantity
that the rescaling factor is extracted from.
offset : int, optional
A constant offset to add to lengths.
Returns
-------
float
"""
assert(len(lengths) == len(quantity)), "Data format incorrect!"
rescaling_factor = []
for i in range(len(lengths)):
rescaling_factor.append(_np.mean(_np.array(quantity[i]) / (lengths[i] + offset)))
rescaling_factor = _np.mean(_np.array(rescaling_factor))
return rescaling_factor