-
Notifications
You must be signed in to change notification settings - Fork 55
/
optools.py
2475 lines (1989 loc) · 98.2 KB
/
optools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Utility functions operating on operation matrices
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import warnings as _warnings
import numpy as _np
import scipy.linalg as _spl
import scipy.sparse as _sps
import scipy.sparse.linalg as _spsl
import functools as _functools
from pygsti.tools import basistools as _bt
from pygsti.tools import jamiolkowski as _jam
from pygsti.tools import lindbladtools as _lt
from pygsti.tools import matrixtools as _mt
from pygsti.baseobjs.basis import Basis as _Basis, ExplicitBasis as _ExplicitBasis, DirectSumBasis as _DirectSumBasis
from pygsti.baseobjs.label import Label as _Label
from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel
from pygsti.tools.legacytools import deprecate as _deprecated_fn
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
def _flat_mut_blks(i, j, block_dims):
# like _mut(i,j,dim).flatten() but works with basis *blocks*
N = sum(block_dims)
mx = _np.zeros((N, N), 'd'); mx[i, j] = 1.0
ret = _np.zeros(sum([d**2 for d in block_dims]), 'd')
i = 0; off = 0
for d in block_dims:
ret[i:i + d**2] = mx[off:off + d, off:off + d].flatten()
i += d**2; off += d
return ret
def _hack_sqrtm(a):
sqrt, _ = _spl.sqrtm(a, disp=False) # Travis found this scipy function
# to be incorrect in certain cases (we need a workaround)
if _np.any(_np.isnan(sqrt)): # this is sometimes a good fallback when sqrtm doesn't work.
ev, U = _np.linalg.eig(a)
sqrt = _np.dot(U, _np.dot(_np.diag(_np.sqrt(ev)), _np.linalg.inv(U)))
# Scipy 1.10 fix for PR 16294 (which doubles precision of complex to complex)
if _np.iscomplexobj(a):
sqrt = sqrt.astype(a.dtype, copy=False)
return sqrt
def fidelity(a, b):
"""
Returns the quantum state fidelity between density matrices.
This given by:
`F = Tr( sqrt{ sqrt(a) * b * sqrt(a) } )^2`
To compute process fidelity, pass this function the
Choi matrices of the two processes, or just call
:function:`entanglement_fidelity` with the operation matrices.
Parameters
----------
a : numpy array
First density matrix.
b : numpy array
Second density matrix.
Returns
-------
float
The resulting fidelity.
"""
evals, U = _np.linalg.eig(a)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when a is rank 1, a = vec * vec^T and sqrt(a) = a
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(b, vec)).real # vec^T * b * vec
return float(F)
evals, U = _np.linalg.eig(b)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when b is rank 1 (recally fidelity is sym in args)
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(a, vec)).real # vec^T * a * vec
return float(F)
#if _np.array_equal(a, b): return 1.0 # HACK - some cases when a and b are perfecty equal sqrtm(a) fails...
sqrtA = _hack_sqrtm(a) # _spl.sqrtm(a)
# test the scipy sqrtm function - sometimes fails when rank defficient
#assert(_np.linalg.norm(_np.dot(sqrtA, sqrtA) - a) < 1e-8)
if _np.linalg.norm(_np.dot(sqrtA, sqrtA) - a) > 1e-8:
evals = _np.linalg.eigvals(a)
_warnings.warn(("sqrtm(a) failure when computing fidelity - beware result. "
"Maybe due to rank defficiency - eigenvalues of a are: %s") % evals)
F = (_mt.trace(_hack_sqrtm(_np.dot(sqrtA, _np.dot(b, sqrtA)))).real)**2 # Tr( sqrt{ sqrt(a) * b * sqrt(a) } )^2
return float(F)
def frobeniusdist(a, b):
"""
Returns the frobenius distance between gate or density matrices.
This is given by :
`sqrt( sum( (a_ij-b_ij)^2 ) )`
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm(a - b)
def frobeniusdist_squared(a, b):
"""
Returns the square of the frobenius distance between gate or density matrices.
This is given by :
`sum( (A_ij-B_ij)^2 )`
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm_squared(a - b)
def residuals(a, b):
"""
Calculate residuals between the elements of two matrices
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
Returns
-------
np.array
residuals
"""
return (a - b).flatten()
def tracenorm(a):
"""
Compute the trace norm of matrix `a` given by:
`Tr( sqrt{ a^dagger * a } )`
Parameters
----------
a : numpy array
The matrix to compute the trace norm of.
Returns
-------
float
"""
if _np.linalg.norm(a - _np.conjugate(a.T)) < 1e-8:
#Hermitian, so just sum eigenvalue magnitudes
return _np.sum(_np.abs(_np.linalg.eigvals(a)))
else:
#Sum of singular values (positive by construction)
return _np.sum(_np.linalg.svd(a, compute_uv=False))
def tracedist(a, b):
"""
Compute the trace distance between matrices.
This is given by:
`D = 0.5 * Tr( sqrt{ (a-b)^dagger * (a-b) } )`
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
Returns
-------
float
"""
return 0.5 * tracenorm(a - b)
def diamonddist(a, b, mx_basis='pp', return_x=False):
"""
Returns the approximate diamond norm describing the difference between gate matrices.
This is given by :
`D = ||a - b ||_diamond = sup_rho || AxI(rho) - BxI(rho) ||_1`
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
mx_basis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_x : bool, optional
Whether to return a numpy array encoding the state (rho) at
which the maximal trace distance occurs.
Returns
-------
dm : float
Diamond norm
W : numpy array
Only returned if `return_x = True`. Encodes the state rho, such that
`dm = trace( |(J(a)-J(b)).T * W| )`.
"""
mx_basis = _bt.create_basis_for_matrix(a, mx_basis)
#currently cvxpy is only needed for this function, so don't import until here
import cvxpy as _cvxpy
#Check if using version < 1.0
old_cvxpy = bool(tuple(map(int, _cvxpy.__version__.split('.'))) < (1, 0))
# This SDP implementation is a modified version of Kevin's code
#Compute the diamond norm
#Uses the primal SDP from arXiv:1207.5726v2, Sec 3.2
#Maximize 1/2 ( < J(phi), X > + < J(phi).dag, X.dag > )
#Subject to [[ I otimes rho0, X],
# [X.dag, I otimes rho1]] >> 0
# rho0, rho1 are density matrices
# X is linear operator
#Jamiolkowski representation of the process
# J(phi) = sum_ij Phi(Eij) otimes Eij
#< a, b > = Tr(a.dag b)
#def vec(matrix_in):
# # Stack the columns of a matrix to return a vector
# return _np.transpose(matrix_in).flatten()
#
#def unvec(vector_in):
# # Slice a vector into columns of a matrix
# d = int(_np.sqrt(vector_in.size))
# return _np.transpose(vector_in.reshape( (d,d) ))
#Code below assumes *un-normalized* Jamiol-isomorphism, so multiply by
# density mx dimension (`smallDim`) below
JAstd = _jam.fast_jamiolkowski_iso_std(a, mx_basis)
JBstd = _jam.fast_jamiolkowski_iso_std(b, mx_basis)
#Do this *after* the fast_jamiolkowski_iso calls above because these will convert
# a & b to a "single-block" basis representation when mx_basis has multiple blocks.
dim = JAstd.shape[0]
smallDim = int(_np.sqrt(dim))
JAstd *= smallDim # see above comment
JBstd *= smallDim # see above comment
assert(dim == JAstd.shape[1] == JBstd.shape[0] == JBstd.shape[1])
#CHECK: Kevin's jamiolowski, which implements the un-normalized isomorphism:
# smallDim * _jam.jamiolkowski_iso(M, "std", "std")
#def kevins_jamiolkowski(process, representation = 'superoperator'):
# # Return the Choi-Jamiolkowski representation of a quantum process
# # Add methods as necessary to accept different representations
# process = _np.array(process)
# if representation == 'superoperator':
# # Superoperator is the linear operator acting on vec(rho)
# dimension = int(_np.sqrt(process.shape[0]))
# print "dim = ",dimension
# jamiolkowski_matrix = _np.zeros([dimension**2, dimension**2], dtype='complex')
# for i in range(dimension**2):
# Ei_vec= _np.zeros(dimension**2)
# Ei_vec[i] = 1
# output = unvec(_np.dot(process,Ei_vec))
# tmp = _np.kron(output, unvec(Ei_vec))
# print "E%d = \n" % i,unvec(Ei_vec)
# #print "contrib =",_np.kron(output, unvec(Ei_vec))
# jamiolkowski_matrix += tmp
# return jamiolkowski_matrix
#JAstd_kev = jamiolkowski(a)
#JBstd_kev = jamiolkowski(b)
#print "diff a = ",_np.linalg.norm(JAstd_kev/2.0-JAstd)
#print "diff b = ",_np.linalg.norm(JBstd_kev/2.0-JBstd)
#Kevin's function: def diamondnorm( jamiolkowski_matrix ):
jamiolkowski_matrix = JBstd - JAstd
# Here we define a bunch of auxiliary matrices because CVXPY doesn't use complex numbers
K = jamiolkowski_matrix.real # J.real
L = jamiolkowski_matrix.imag # J.imag
if old_cvxpy:
Y = _cvxpy.Variable(dim, dim) # X.real
Z = _cvxpy.Variable(dim, dim) # X.imag
sig0 = _cvxpy.Variable(smallDim, smallDim) # rho0.real
sig1 = _cvxpy.Variable(smallDim, smallDim) # rho1.real
tau0 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
tau1 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
else:
Y = _cvxpy.Variable(shape=(dim, dim)) # X.real
Z = _cvxpy.Variable(shape=(dim, dim)) # X.imag
sig0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho0.real
sig1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.real
tau0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
tau1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
ident = _np.identity(smallDim, 'd')
objective = _cvxpy.Maximize(_cvxpy.trace(K.T @ Y + L.T @ Z))
constraints = [_cvxpy.bmat([
[_cvxpy.kron(ident, sig0), Y, -_cvxpy.kron(ident, tau0), -Z],
[Y.T, _cvxpy.kron(ident, sig1), Z.T, -_cvxpy.kron(ident, tau1)],
[_cvxpy.kron(ident, tau0), Z, _cvxpy.kron(ident, sig0), Y],
[-Z.T, _cvxpy.kron(ident, tau1), Y.T, _cvxpy.kron(ident, sig1)]]) >> 0,
_cvxpy.bmat([[sig0, -tau0],
[tau0, sig0]]) >> 0,
_cvxpy.bmat([[sig1, -tau1],
[tau1, sig1]]) >> 0,
sig0 == sig0.T,
sig1 == sig1.T,
tau0 == -tau0.T,
tau1 == -tau1.T,
_cvxpy.trace(sig0) == 1.,
_cvxpy.trace(sig1) == 1.]
prob = _cvxpy.Problem(objective, constraints)
try:
prob.solve(solver="CVXOPT")
# prob.solve(solver="ECOS")
# prob.solve(solver="SCS")#This always fails
except _cvxpy.error.SolverError as e:
_warnings.warn("CVXPY failed: %s - diamonddist returning -2!" % str(e))
return (-2, _np.zeros((dim, dim))) if return_x else -2
except:
_warnings.warn("CVXOPT failed (uknown err) - diamonddist returning -2!")
return (-2, _np.zeros((dim, dim))) if return_x else -2
#Validate result
#assert( abs(_np.trace(_np.dot(K.T,Y.value) + _np.dot(L.T,Z.value))-prob.value) < 1e-6 ), \
# "Diamondnorm mismatch"
if return_x:
X = Y.value + 1j * Z.value # encodes state at which maximum trace-distance occurs
return prob.value, X
else:
return prob.value
def jtracedist(a, b, mx_basis='pp'): # Jamiolkowski trace distance: Tr(|J(a)-J(b)|)
"""
Compute the Jamiolkowski trace distance between operation matrices.
This is given by:
D = 0.5 * Tr( sqrt{ (J(a)-J(b))^2 } )
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
float
"""
JA = _jam.fast_jamiolkowski_iso_std(a, mx_basis)
JB = _jam.fast_jamiolkowski_iso_std(b, mx_basis)
return tracedist(JA, JB)
def entanglement_fidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None):
"""
Returns the "entanglement" process fidelity between gate matrices.
This is given by:
`F = Tr( sqrt{ sqrt(J(a)) * J(b) * sqrt(J(a)) } )^2`
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
When the both of the input matrices a and b are TP, and
the target matrix b is unitary then we can use a more efficient
formula:
`F= Tr(a @ b.conjugate().T)/d^2
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
is_tp : bool, optional (default None)
Flag indicating both matrices are TP. If None (the default),
an explicit check is performed. If True/False, the check is
skipped and the provided value is used (faster, but should only
be used when the user is certain this is true apriori).
is_unitary : bool, optional (default None)
Flag indicating that the second matrix, b, is
unitary. If None (the default) an explicit check is performed.
If True/False, the check is skipped and the provided value is used
(faster, but should only be used when the user is certain
this is true apriori).
Returns
-------
float
"""
d2 = a.shape[0]
#if the tp flag isn't set we'll calculate whether it is true here
if is_tp is None:
def is_tp_fn(x): return _np.isclose(x[0, 0], 1.0) and all(
[_np.isclose(x[0, i], 0) for i in range(1,d2)])
is_tp= (is_tp_fn(a) and is_tp_fn(b))
#if the unitary flag isn't set we'll calculate whether it is true here
if is_unitary is None:
is_unitary= _np.allclose(_np.identity(d2, 'd'), _np.dot(b, b.conjugate().T))
if is_tp and is_unitary: # then assume TP-like gates & use simpler formula
#old version, slower than einsum
#TrLambda = _np.trace(_np.dot(a, b.conjugate().T)) # same as using _np.linalg.inv(b)
#Use einsum black magic to only calculate the diagonal elements
#if the basis is either pp or gm we know the elements are real-valued, so we
#don't need to take the conjugate
if mx_basis=='pp' or mx_basis=='gm':
TrLambda = _np.einsum('ij,ji->',a, b.T)
else:
TrLambda = _np.einsum('ij,ji->',a, b.conjugate().T)
return TrLambda / d2
JA = _jam.jamiolkowski_iso(a, mx_basis, mx_basis)
JB = _jam.jamiolkowski_iso(b, mx_basis, mx_basis)
return fidelity(JA, JB)
def average_gate_fidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None):
"""
Computes the average gate fidelity (AGF) between two gates.
Average gate fidelity (`F_g`) is related to entanglement fidelity
(`F_p`), via:
`F_g = (d * F_p + 1)/(1 + d)`,
where d is the Hilbert space dimension. This formula, and the
definition of AGF, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
a : array or gate
The gate to compute the AGI to b of. E.g., an imperfect
implementation of b.
b : array or gate
The gate to compute the AGI to a of. E.g., the target gate
corresponding to a.
mx_basis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
is_tp : bool, optional (default None)
Flag indicating both matrices are TP. If None (the default),
an explicit check is performed. If True/False, the check is
skipped and the provided value is used (faster, but should only
be used when the user is certain this is true apriori).
is_unitary : bool, optional (default None)
Flag indicating that the second matrix, b, is
unitary. If None (the default) an explicit check is performed.
If True/False, the check is skipped and the provided value is used
(faster, but should only be used when the user is certain
this is true apriori).
Returns
-------
AGI : float
The AGI of a to b.
"""
d = int(round(_np.sqrt(a.shape[0])))
PF = entanglement_fidelity(a, b, mx_basis, is_tp, is_unitary)
AGF = (d * PF + 1) / (1 + d)
return float(AGF)
def average_gate_infidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None):
"""
Computes the average gate infidelity (`AGI`) between two gates.
Average gate infidelity is related to entanglement infidelity
(`EI`) via:
`AGI = (d * (1-EI) + 1)/(1 + d)`,
where d is the Hilbert space dimension. This formula, and the
definition of AGI, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
a : array or gate
The gate to compute the AGI to b of. E.g., an imperfect
implementation of b.
b : array or gate
The gate to compute the AGI to a of. E.g., the target gate
corresponding to a.
mx_basis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
is_tp : bool, optional (default None)
Flag indicating both matrices are TP. If None (the default),
an explicit check is performed. If True/False, the check is
skipped and the provided value is used (faster, but should only
be used when the user is certain this is true apriori).
is_unitary : bool, optional (default None)
Flag indicating that the second matrix, b, is
unitary. If None (the default) an explicit check is performed.
If True/False, the check is skipped and the provided value is used
(faster, but should only be used when the user is certain
this is true apriori).
Returns
-------
float
"""
return 1 - average_gate_fidelity(a, b, mx_basis, is_tp, is_unitary)
def entanglement_infidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None):
"""
Returns the entanglement infidelity (EI) between gate matrices.
This i given by:
`EI = 1 - Tr( sqrt{ sqrt(J(a)) * J(b) * sqrt(J(a)) } )^2`
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
a : numpy array
First matrix.
b : numpy array
Second matrix.
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
is_tp : bool, optional (default None)
Flag indicating both matrices are TP. If None (the default),
an explicit check is performed. If True/False, the check is
skipped and the provided value is used (faster, but should only
be used when the user is certain this is true apriori).
is_unitary : bool, optional (default None)
Flag indicating that the second matrix, b, is
unitary. If None (the default) an explicit check is performed.
If True/False, the check is skipped and the provided value is used
(faster, but should only be used when the user is certain
this is true apriori).
Returns
-------
EI : float
The EI of a to b.
"""
return 1 - float(entanglement_fidelity(a, b, mx_basis, is_tp, is_unitary))
def gateset_infidelity(model, target_model, itype='EI',
weights=None, mx_basis=None, is_tp=None, is_unitary=None):
"""
Computes the average-over-gates of the infidelity between gates in `model` and the gates in `target_model`.
If `itype` is 'EI' then the "infidelity" is the entanglement infidelity; if
`itype` is 'AGI' then the "infidelity" is the average gate infidelity (AGI
and EI are related by a dimension dependent constant).
This is the quantity that RB error rates are sometimes claimed to be
related to directly related.
Parameters
----------
model : Model
The model to calculate the average infidelity, to `target_model`, of.
target_model : Model
The model to calculate the average infidelity, to `model`, of.
itype : str, optional
The infidelity type. Either 'EI', corresponding to entanglement
infidelity, or 'AGI', corresponding to average gate infidelity.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are, possibly unnormalized, probabilities.
These probabilities corresponding to the weighting in the average,
so if the model contains gates A and B and weights[A] = 2 and
weights[B] = 1 then the output is Inf(A)*2/3 + Inf(B)/3 where
Inf(X) is the infidelity (to the corresponding element in the other
model) of X. If None, a uniform-average is taken, equivalent to
setting all the weights to 1.
mx_basis : {"std","gm","pp"} or Basis object, optional
The basis of the models. If None, the basis is obtained from
the model.
is_tp : bool, optional (default None)
Flag indicating both matrices are TP. If None (the default),
an explicit check is performed. If True/False, the check is
skipped and the provided value is used (faster, but should only
be used when the user is certain this is true apriori).
is_unitary : bool, optional (default None)
Flag indicating that the second matrix, b, is
unitary. If None (the default) an explicit check is performed.
If True/False, the check is skipped and the provided value is used
(faster, but should only be used when the user is certain
this is true apriori).
Returns
-------
float
The weighted average-over-gates infidelity between the two models.
"""
assert(itype == 'AGI' or itype == 'EI'), \
"The infidelity type must be `AGI` (average gate infidelity) or `EI` (entanglement infidelity)"
if mx_basis is None: mx_basis = model.basis
sum_of_weights = 0
I_list = []
for gate in list(target_model.operations.keys()):
if itype == 'AGI':
I = average_gate_infidelity(model.operations[gate], target_model.operations[gate], mx_basis, is_tp)
if itype == 'EI':
I = entanglement_infidelity(model.operations[gate], target_model.operations[gate], mx_basis, is_tp)
if weights is None:
w = 1
else:
w = weights[gate]
I_list.append(w * I)
sum_of_weights += w
assert(sum_of_weights > 0), "The sum of the weights should be positive!"
AI = _np.sum(I_list) / sum_of_weights
return AI
def unitarity(a, mx_basis="gm"):
"""
Returns the "unitarity" of a channel.
Unitarity is defined as in Wallman et al, ``Estimating the Coherence of
noise'' NJP 17 113020 (2015). The unitarity is given by (Prop 1 in Wallman
et al):
`u(a) = Tr( A_u^{\dagger} A_u ) / (d^2 - 1)`,
where A_u is the unital submatrix of a, and d is the dimension of
the Hilbert space. When a is written in any basis for which the
first element is the normalized identity (e.g., the pp or gm
bases), The unital submatrix of a is the matrix obtained when the
top row and left hand column is removed from a.
Parameters
----------
a : array or gate
The gate for which the unitarity is to be computed.
mx_basis : {"std","gm","pp"} or a Basis object, optional
The basis of the matrix.
Returns
-------
float
"""
d = int(round(_np.sqrt(a.shape[0])))
basisMxs = _bt.basis_matrices(mx_basis, a.shape[0])
if _np.allclose(basisMxs[0], _np.identity(d, 'd')):
B = a
else:
B = _bt.change_basis(a, mx_basis, "gm") # everything should be able to be put in the "gm" basis
unital = B[1:d**2, 1:d**2]
#old version
#u = _np.trace(_np.dot(_np.conj(_np.transpose(unital)), unital)) / (d**2 - 1)
#new version
u= _np.einsum('ij,ji->', unital.conjugate().T, unital ) / (d**2 - 1)
return u
def fidelity_upper_bound(operation_mx):
"""
Get an upper bound on the fidelity of the given operation matrix with any unitary operation matrix.
The closeness of the result to one tells
how "unitary" the action of operation_mx is.
Parameters
----------
operation_mx : numpy array
The operation matrix to act on.
Returns
-------
float
The resulting upper bound on fidelity(operation_mx, anyUnitaryGateMx)
"""
choi = _jam.jamiolkowski_iso(operation_mx, choi_mx_basis="std")
choi_evals, choi_evecs = _np.linalg.eig(choi)
maxF_direct = max([_np.sqrt(max(ev.real, 0.0)) for ev in choi_evals]) ** 2
iMax = _np.argmax([ev.real for ev in choi_evals]) # index of maximum eigenval
closestVec = choi_evecs[:, iMax:(iMax + 1)]
# #print "DEBUG: closest evec = ", closestUnitaryVec
# new_evals = _np.zeros( len(closestUnitaryVec) ); new_evals[iClosestU] = 1.0
# # gives same result:
# closestUnitaryJmx = _np.dot(choi_evecs, _np.dot( _np.diag(new_evals), _np.linalg.inv(choi_evecs) ) )
closestJmx = _np.kron(closestVec, _np.transpose(_np.conjugate(closestVec))) # closest rank-1 Jmx
closestJmx /= _mt.trace(closestJmx) # normalize so trace of Jmx == 1.0
maxF = fidelity(choi, closestJmx)
if not _np.isnan(maxF):
#Uncomment for debugging
#if abs(maxF - maxF_direct) >= 1e-6:
# print "DEBUG: operation_mx:\n",operation_mx
# print "DEBUG: choi_mx:\n",choi
# print "DEBUG choi_evals = ",choi_evals, " iMax = ",iMax
# #print "DEBUG: J = \n", closestUnitaryJmx
# print "DEBUG: eigvals(J) = ", _np.linalg.eigvals(closestJmx)
# print "DEBUG: trace(J) = ", _mt.trace(closestJmx)
# print "DEBUG: maxF = %f, maxF_direct = %f" % (maxF, maxF_direct)
# raise ValueError("ERROR: maxF - maxF_direct = %f" % (maxF -maxF_direct))
assert(abs(maxF - maxF_direct) < 1e-6)
else:
maxF = maxF_direct # case when maxF is nan, due to scipy sqrtm function being buggy - just use direct F
closestOpMx = _jam.jamiolkowski_iso_inv(closestJmx, choi_mx_basis="std")
return maxF, closestOpMx
#closestU_evals, closestU_evecs = _np.linalg.eig(closestUnitaryGateMx)
#print "DEBUG: U = \n", closestUnitaryGateMx
#print "DEBUG: closest U evals = ",closestU_evals
#print "DEBUG: evecs = \n",closestU_evecs
def compute_povm_map(model, povmlbl):
"""
Constructs a gate-like quantity for the POVM within `model`.
This is done by embedding the `k`-outcome classical output space of the POVM
in the Hilbert-Schmidt space of `k` by `k` density matrices by placing the
classical probability distribution along the diagonal of the density matrix.
Currently, this is only implemented for the case when `k` equals `d`, the
dimension of the POVM's Hilbert space.
Parameters
----------
model : Model
The model supplying the POVM effect vectors and the basis those
vectors are in.
povmlbl : str
The POVM label
Returns
-------
numpy.ndarray
The matrix of the "POVM map" in the `model.basis` basis.
"""
povmVectors = [v.to_dense()[:, None] for v in model.povms[povmlbl].values()]
if isinstance(model.basis, _DirectSumBasis): # HACK - need to get this to work with general bases
blkDims = [int(_np.sqrt(comp.dim)) for comp in model.basis.component_bases]
else:
blkDims = [int(round(_np.sqrt(model.dim)))] # [d] where density matrix is dxd
nV = len(povmVectors)
#assert(d**2 == model.dim), "Model dimension (%d) is not a perfect square!" % model.dim
#assert( nV**2 == d ), "Can only compute POVM metrics when num of effects == H space dimension"
# I don't think above assert is needed - should work in general (Robin?)
povm_mx = _np.concatenate(povmVectors, axis=1).T # "povm map" ( B(H) -> S_k ) (shape= nV,model.dim)
Sk_embedding_in_std = _np.zeros((model.dim, nV))
for i in range(nV):
Sk_embedding_in_std[:, i] = _flat_mut_blks(i, i, blkDims)
std_basis = _Basis.cast('std', model.dim) # make sure std basis is just straight-up d-dimension
std_to_basis = model.basis.reverse_transform_matrix(std_basis)
# OLD: _bt.create_transform_matrix("std", model.basis, blkDims)
assert(std_to_basis.shape == (model.dim, model.dim))
return _np.dot(std_to_basis, _np.dot(Sk_embedding_in_std, povm_mx))
def povm_fidelity(model, target_model, povmlbl):
"""
Computes the process (entanglement) fidelity between POVM maps.
Parameters
----------
model : Model
The model the POVM belongs to.
target_model : Model
The target model (which also has `povmlbl`).
povmlbl : Label
Label of the POVM to get the fidelity of.
Returns
-------
float
"""
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return entanglement_fidelity(povm_mx, target_povm_mx, target_model.basis)
def povm_jtracedist(model, target_model, povmlbl):
"""
Computes the Jamiolkowski trace distance between POVM maps using :func:`jtracedist`.
Parameters
----------
model : Model
The model the POVM belongs to.
target_model : Model
The target model (which also has `povmlbl`).
povmlbl : Label
Label of the POVM to get the trace distance of.
Returns
-------
float
"""
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return jtracedist(povm_mx, target_povm_mx, target_model.basis)
def povm_diamonddist(model, target_model, povmlbl):
"""
Computes the diamond distance between POVM maps using :func:`diamonddist`.
Parameters
----------
model : Model
The model the POVM belongs to.
target_model : Model
The target model (which also has `povmlbl`).
povmlbl : Label
Label of the POVM to get the diamond distance of.
Returns
-------
float
"""
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return diamonddist(povm_mx, target_povm_mx, target_model.basis)
#decompose operation matrix into axis of rotation, etc
def decompose_gate_matrix(operation_mx):
"""
Decompse a gate matrix into fixed points, axes of rotation, angles of rotation, and decay rates.
This funtion computes how the action of a operation matrix can be is
decomposed into fixed points, axes of rotation, angles of rotation, and
decays. Also determines whether a gate appears to be valid and/or unitary.
Parameters
----------
operation_mx : numpy array
The operation matrix to act on.
Returns
-------
dict
A dictionary describing the decomposed action. Keys are:
'isValid' : bool
whether decomposition succeeded
'isUnitary' : bool
whether operation_mx describes unitary action
'fixed point' : numpy array
the fixed point of the action
'axis of rotation' : numpy array or nan
the axis of rotation
'decay of diagonal rotation terms' : float
decay of diagonal terms
'rotating axis 1' : numpy array or nan
1st axis orthogonal to axis of rotation
'rotating axis 2' : numpy array or nan
2nd axis orthogonal to axis of rotation
'decay of off diagonal rotation terms' : float
decay of off-diagonal terms
'pi rotations' : float
angle of rotation in units of pi radians
"""
op_evals, op_evecs = _np.linalg.eig(_np.asarray(operation_mx))
# fp_eigenvec = None
# aor_eval = None; aor_eigenvec = None
# ra_eval = None; ra1_eigenvec = None; ra2_eigenvec = None
TOL = 1e-4 # 1e-7
unit_eval_indices = [i for (i, ev) in enumerate(op_evals) if abs(ev - 1.0) < TOL]