/
tensor_core.py
3856 lines (3083 loc) · 128 KB
/
tensor_core.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Core tensor network tools.
"""
import os
import re
import copy
import uuid
import math
import string
import weakref
import operator
import functools
import itertools
import contextlib
import collections
from numbers import Integral
from cytoolz import (unique, concat, frequencies,
partition_all, merge_with, valmap)
import numpy as np
import opt_einsum as oe
import scipy.sparse.linalg as spla
from autoray import do, conj, reshape, transpose
from ..core import qarray, prod, realify_scalar, vdot, common_type
from ..utils import check_opt, functions_equal
from ..gen.rand import randn, seed_rand
from . import decomp
from .array_ops import iscomplex, norm_fro, unitize, ndim, asarray
_DEFAULT_CONTRACTION_STRATEGY = 'greedy'
def get_contract_strategy():
"""Get the default contraction strategy - the option supplied as
``optimize`` to ``opt_einsum``.
"""
return _DEFAULT_CONTRACTION_STRATEGY
def set_contract_strategy(strategy):
"""Get the default contraction strategy - the option supplied as
``optimize`` to ``opt_einsum``.
"""
global _DEFAULT_CONTRACTION_STRATEGY
_DEFAULT_CONTRACTION_STRATEGY = strategy
@contextlib.contextmanager
def contract_strategy(strategy):
"""A context manager to temporarily set the default contraction strategy
supplied as ``optimize`` to ``opt_einsum``.
"""
orig_strategy = get_contract_strategy()
try:
yield set_contract_strategy(strategy)
finally:
set_contract_strategy(orig_strategy)
def _get_contract_expr(eq, *shapes, **kwargs):
# choose how large intermediate arrays can be
kwargs.setdefault('optimize', _DEFAULT_CONTRACTION_STRATEGY)
return oe.contract_expression(eq, *shapes, **kwargs)
_get_contract_expr_cached = functools.lru_cache(4096)(_get_contract_expr)
def get_contract_expr(eq, *shapes, cache=True, **kwargs):
"""Get an callable expression that will evaluate ``eq`` based on
``shapes``. Cache the result if no constant tensors are involved.
"""
# can only cache if the expression does not involve constant tensors
if kwargs.get('constants', None) or not cache:
return _get_contract_expr(eq, *shapes, **kwargs)
# make sure path given as list is hashable and thus cachable
if isinstance(kwargs.get('optimize', None), list):
kwargs['optimize'] = tuple(kwargs['optimize'])
try:
return _get_contract_expr_cached(eq, *shapes, **kwargs)
except TypeError:
shapes = (tuple(map(int, s)) for s in shapes)
return _get_contract_expr_cached(eq, *shapes, **kwargs)
try:
from opt_einsum.contract import infer_backend
del infer_backend
_CONTRACT_BACKEND = 'auto'
_TENSOR_LINOP_BACKEND = 'auto'
except ImportError:
_CONTRACT_BACKEND = 'numpy'
_TENSOR_LINOP_BACKEND = 'numpy'
def get_contract_backend():
"""Get the default backend used for tensor contractions, via 'opt_einsum'.
See Also
--------
set_contract_backend, get_tensor_linop_backend, set_tensor_linop_backend,
tensor_contract
"""
return _CONTRACT_BACKEND
def set_contract_backend(backend):
"""Set the default backend used for tensor contractions, via 'opt_einsum'.
See Also
--------
get_contract_backend, set_tensor_linop_backend, get_tensor_linop_backend,
tensor_contract
"""
global _CONTRACT_BACKEND
_CONTRACT_BACKEND = backend
@contextlib.contextmanager
def contract_backend(backend):
"""A context manager to temporarily set the default backend used for tensor
contractions, via 'opt_einsum'.
"""
orig_backend = get_contract_backend()
try:
yield set_contract_backend(backend)
finally:
set_contract_backend(orig_backend)
def get_tensor_linop_backend():
"""Get the default backend used for tensor network linear operators, via
'opt_einsum'. This is different from the default contraction backend as
the contractions are likely repeatedly called many times.
See Also
--------
set_tensor_linop_backend, set_contract_backend, get_contract_backend,
TNLinearOperator
"""
return _TENSOR_LINOP_BACKEND
def set_tensor_linop_backend(backend):
"""Set the default backend used for tensor network linear operators, via
'opt_einsum'. This is different from the default contraction backend as
the contractions are likely repeatedly called many times.
See Also
--------
get_tensor_linop_backend, set_contract_backend, get_contract_backend,
TNLinearOperator
"""
global _TENSOR_LINOP_BACKEND
_TENSOR_LINOP_BACKEND = backend
@contextlib.contextmanager
def tensor_linop_backend(backend):
"""A context manager to temporarily set the default backend used for tensor
network linear operators, via 'opt_einsum'.
"""
orig_backend = get_tensor_linop_backend()
try:
yield set_tensor_linop_backend(backend)
finally:
set_tensor_linop_backend(orig_backend)
# --------------------------------------------------------------------------- #
# Tensor Funcs #
# --------------------------------------------------------------------------- #
def set_union(sets):
"""Non variadic version of set.union.
"""
return set.union(*sets)
class OrderedCounter(collections.Counter, collections.OrderedDict):
pass
def _gen_output_inds(all_inds):
"""Generate the output, i.e. unique, indices from the set ``inds``. Raise
if any index found more than twice.
"""
cnts = OrderedCounter(all_inds)
for ind, freq in cnts.items():
if freq > 2:
raise ValueError("The index {} appears more "
"than twice!".format(ind))
elif freq == 1:
yield ind
def _maybe_map_indices_to_alphabet(a_ix, i_ix, o_ix):
"""``einsum`` need characters a-z,A-Z or equivalent numbers.
Do this early, and allow *any* index labels.
Parameters
----------
a_ix : set
All of the input indices.
i_ix : sequence of sequence
The input indices per tensor.
o_ix : list of int
The output indices.
Returns
-------
eq : str
The string to feed to einsum/contract.
"""
amap = {ix: oe.get_symbol(i) for i, ix in enumerate(a_ix)}
in_str = ("".join(amap[i] for i in ix) for ix in i_ix)
out_str = "".join(amap[o] for o in o_ix)
return ",".join(in_str) + "->" + out_str
_VALID_CONTRACT_GET = {None, 'expression', 'path-info', 'symbol-map'}
def tensor_contract(*tensors, output_inds=None, get=None,
backend=None, **contract_opts):
"""Efficiently contract multiple tensors, combining their tags.
Parameters
----------
tensors : sequence of Tensor
The tensors to contract.
output_inds : sequence of str
If given, the desired order of output indices, else defaults to the
order they occur in the input indices.
get : {None, 'expression', 'path-info', 'opt_einsum'}, optional
What to return. If:
* ``None`` (the default) - return the resulting scalar or Tensor.
* ``'expression'`` - return the ``opt_einsum`` expression that
performs the contraction and operates on the raw arrays.
* ``'symbol-map'`` - return the dict mapping ``opt_einsum`` symbols
to tensor indices.
* ``'path-info'`` - return the full ``opt_einsum`` path object with
detailed information such as flop cost. The symbol-map is also
added to the ``quimb_symbol_map`` attribute.
backend : {'numpy', 'cupy', 'tensorflow', 'theano', 'dask', ...}, optional
Which backend to use to perform the contraction. Must be a valid
``opt_einsum`` backend with the relevant library installed.
contract_opts
Passed to ``opt_einsum.contract_expression`` or
``opt_einsum.contract_path``.
Returns
-------
scalar or Tensor
"""
check_opt('get', get, _VALID_CONTRACT_GET)
if backend is None:
backend = _CONTRACT_BACKEND
i_ix = tuple(t.inds for t in tensors) # input indices per tensor
total_ix = tuple(concat(i_ix)) # list of all input indices
all_ix = tuple(unique(total_ix))
if output_inds is None:
# sort output indices by input order for efficiency and consistency
o_ix = tuple(_gen_output_inds(total_ix))
else:
o_ix = output_inds
# possibly map indices into the range needed by opt- einsum
eq = _maybe_map_indices_to_alphabet(all_ix, i_ix, o_ix)
if get == 'symbol-map':
return {oe.get_symbol(i): ix for i, ix in enumerate(all_ix)}
if get == 'path-info':
ops = (t.data for t in tensors)
path_info = oe.contract_path(eq, *ops, **contract_opts)[1]
path_info.quimb_symbol_map = {oe.get_symbol(i): ix
for i, ix in enumerate(all_ix)}
return path_info
if get == 'expression':
# account for possible constant tensors
cnst = contract_opts.get('constants', ())
ops = (t.data if i in cnst else t.shape for i, t in enumerate(tensors))
expression = get_contract_expr(eq, *ops, **contract_opts)
return expression
# perform the contraction
shapes = (t.shape for t in tensors)
expression = get_contract_expr(eq, *shapes, **contract_opts)
o_array = expression(*(t.data for t in tensors), backend=backend)
if not o_ix:
if isinstance(o_array, np.ndarray):
o_array = realify_scalar(o_array.item(0))
return o_array
# unison of all tags
o_tags = set_union(t.tags for t in tensors)
return Tensor(data=o_array, inds=o_ix, tags=o_tags)
# generate a random base to avoid collisions on difference processes ...
r_bs_str = str(uuid.uuid4())[:6]
# but then make the list orderable to help contraction caching
RAND_UUIDS = map("".join, itertools.product(string.hexdigits, repeat=7))
def rand_uuid(base=""):
"""Return a guaranteed unique, shortish identifier, optional appended
to ``base``.
Examples
--------
>>> rand_uuid()
'_2e1dae1b'
>>> rand_uuid('virt-bond')
'virt-bond_bf342e68'
"""
return base + "_" + r_bs_str + next(RAND_UUIDS)
_VALID_SPLIT_GET = {None, 'arrays', 'tensors', 'values'}
def tensor_split(T, left_inds, method='svd', max_bond=None, absorb='both',
cutoff=1e-10, cutoff_mode='sum2', get=None, bond_ind=None,
ltags=None, rtags=None, right_inds=None):
"""Decompose this tensor into two tensors.
Parameters
----------
T : Tensor
The tensor to split.
left_inds : str or sequence of str
The index or sequence of inds, which ``tensor`` should already have, to
split to the 'left'.
method : str, optional
How to split the tensor, only some methods allow bond truncation:
- 'svd': full SVD, allows truncation.
- 'eig': full SVD via eigendecomp, allows truncation.
- 'svds': iterative svd, allows truncation.
- 'isvd': iterative svd using interpolative methods, allows
truncation.
- 'rsvd' : randomized iterative svd with truncation.
- 'qr': full QR decomposition.
- 'lq': full LR decomposition.
- 'eigh': full eigen-decomposition, tensor must he hermitian.
- 'eigsh': iterative eigen-decomposition, tensor must he hermitian.
- 'cholesky': full cholesky decomposition, tensor must be positive.
max_bond: None or int
If integer, the maxmimum number of singular values to keep, regardless
of ``cutoff``.
absorb = {'both', 'left', 'right'}
Whether to absorb the singular values into both, the left or right
unitary matrix respectively.
cutoff : float, optional
The threshold below which to discard singular values, only applies to
SVD and eigendecomposition based methods (not QR, LQ, or cholesky).
cutoff_mode : {'sum2', 'rel', 'abs', 'rsum2'}
Method with which to apply the cutoff threshold:
- 'rel': values less than ``cutoff * s[0]`` discarded.
- 'abs': values less than ``cutoff`` discarded.
- 'sum2': sum squared of values discarded must be ``< cutoff``.
- 'rsum2': sum squared of values discarded must be less than
``cutoff`` times the total sum squared values.
get : {None, 'arrays', 'tensors', 'values'}
If given, what to return instead of a TN describing the split. The
default, ``None``, returns a TensorNetwork of the two tensors.
bond_ind : str, optional
Explicitly name the new bond, else a random one will be generated.
ltags : sequence of str, optional
Add these new tags to the left tensor.
rtags : sequence of str, optional
Add these new tags to the right tensor.
right_inds : sequence of str, optional
Explicitly give the right indices, otherwise they will be worked out.
This is a minor performance feature.
Returns
-------
TensorNetwork or (Tensor, Tensor) or (array, array) or 1D-array
Respectively if get={None, 'tensors', 'arrays', 'values'}.
"""
check_opt('get', get, _VALID_SPLIT_GET)
if isinstance(left_inds, str):
left_inds = (left_inds,)
else:
left_inds = tuple(left_inds)
if right_inds is None:
right_inds = tuple(x for x in T.inds if x not in left_inds)
TT = T.transpose(*left_inds, *right_inds)
left_dims = TT.shape[:len(left_inds)]
right_dims = TT.shape[len(left_inds):]
array = reshape(TT.data, (prod(left_dims), prod(right_dims)))
if get == 'values':
return {'svd': decomp._svdvals,
'eig': decomp._svdvals_eig}[method](array)
opts = {}
if method not in ('qr', 'lq'):
# Convert defaults and settings to numeric type for numba funcs
opts['cutoff'] = {None: -1.0}.get(cutoff, cutoff)
opts['absorb'] = {'left': -1, 'both': 0, 'right': 1}[absorb]
opts['max_bond'] = {None: -1}.get(max_bond, max_bond)
opts['cutoff_mode'] = {'abs': 1, 'rel': 2,
'sum2': 3, 'rsum2': 4}[cutoff_mode]
left, right = {
'svd': decomp._svd,
'eig': decomp._eig,
'qr': decomp._qr,
'lq': decomp._lq,
'eigh': decomp._eigh,
'cholesky': decomp._cholesky,
'isvd': decomp._isvd,
'svds': decomp._svds,
'rsvd': decomp._rsvd,
'eigsh': decomp._eigsh,
}[method](array, **opts)
left = reshape(left, (*left_dims, -1))
right = reshape(right, (-1, *right_dims))
if get == 'arrays':
return left, right
bond_ind = rand_uuid() if bond_ind is None else bond_ind
ltags, rtags = tags2set(ltags) | T.tags, tags2set(rtags) | T.tags
Tl = Tensor(data=left, inds=(*left_inds, bond_ind), tags=ltags)
Tr = Tensor(data=right, inds=(bond_ind, *right_inds), tags=rtags)
if get == 'tensors':
return Tl, Tr
return TensorNetwork((Tl, Tr), check_collisions=False)
def tensor_compress_bond(T1, T2, **compress_opts):
r"""Inplace compress between the two single tensors. It follows the
following steps to minimize the size of SVD performed::
a)| | b)| | c)| |
--1---2-- -> --1L~~1R--2L~~2R-- -> --1L~~M~~2R--
| | | ...... | | |
<*> <*> > < <*>
d)| | e)| |
-> --1L~~ML~~MR~~2R-- -> --1C~~~2C--
|.... ....| | |
> < > < ^compressed bond
"""
s_ix, t1_ix = T1.filter_bonds(T2)
if not s_ix:
raise ValueError("The tensors specified don't share an bond.")
# a) -> b)
T1_L, T1_R = T1.split(left_inds=t1_ix, get='tensors',
absorb='right', **compress_opts)
T2_L, T2_R = T2.split(left_inds=s_ix, get='tensors',
absorb='left', **compress_opts)
# b) -> c)
M = (T1_R @ T2_L)
M.drop_tags()
# c) -> d)
M_L, M_R = M.split(left_inds=T1_L.bonds(M), get='tensors',
absorb='both', **compress_opts)
# make sure old bond being used
ns_ix, = M_L.bonds(M_R)
M_L.reindex_({ns_ix: s_ix[0]})
M_R.reindex_({ns_ix: s_ix[0]})
# d) -> e)
T1C = T1_L.contract(M_L, output_inds=T1.inds)
T2C = M_R.contract(T2_R, output_inds=T2.inds)
# update with the new compressed data
T1.modify(data=T1C.data)
T2.modify(data=T2C.data)
def tensor_add_bond(T1, T2):
"""Inplace addition of a dummy bond between ``T1`` and ``T2``.
"""
bnd = rand_uuid()
T1.modify(data=do('expand_dims', T1.data, -1), inds=(*T1.inds, bnd))
T2.modify(data=do('expand_dims', T2.data, -1), inds=(*T2.inds, bnd))
def array_direct_product(X, Y, sum_axes=()):
"""Direct product of two numpy.ndarrays.
Parameters
----------
X : numpy.ndarray
First tensor.
Y : numpy.ndarray
Second tensor, same shape as ``X``.
sum_axes : sequence of int
Axes to sum over rather than direct product, e.g. physical indices when
adding tensor networks.
Returns
-------
Z : numpy.ndarray
Same shape as ``X`` and ``Y``, but with every dimension the sum of the
two respective dimensions, unless it is included in ``sum_axes``.
"""
if isinstance(sum_axes, Integral):
sum_axes = (sum_axes,)
# parse the intermediate and final shape doubling the size of any axes that
# is not to be summed, and preparing slices with which to add X, Y.
final_shape = []
selectorX = []
selectorY = []
for i, (d1, d2) in enumerate(zip(X.shape, Y.shape)):
if i not in sum_axes:
final_shape.append(d1 + d2)
selectorX.append(slice(0, d1))
selectorY.append(slice(d1, None))
else:
if d1 != d2:
raise ValueError("Can only add sum tensor indices of the same "
"size.")
final_shape.append(d1)
selectorX.append(slice(None))
selectorY.append(slice(None))
new_type = common_type(X, Y)
Z = np.zeros(final_shape, dtype=new_type)
# Add tensors to the diagonals
Z[tuple(selectorX)] += X
Z[tuple(selectorY)] += Y
return Z
def tensor_direct_product(T1, T2, sum_inds=(), inplace=False):
"""Direct product of two Tensors. Any axes included in ``sum_inds`` must be
the same size and will be summed over rather than concatenated. Summing
over contractions of TensorNetworks equates to contracting a TensorNetwork
made of direct products of each set of tensors. I.e. (a1 @ b1) + (a2 @ b2)
== (a1 (+) a2) @ (b1 (+) b2).
Parameters
----------
T1 : Tensor
The first tensor.
T2 : Tensor
The second tensor, with matching indices and dimensions to ``T1``.
sum_inds : sequence of str, optional
Axes to sum over rather than combine, e.g. physical indices when
adding tensor networks.
inplace : bool, optional
Whether to modify ``T1`` inplace.
Returns
-------
Tensor
Like ``T1``, but with each dimension doubled in size if not
in ``sum_inds``.
"""
if isinstance(sum_inds, (str, Integral)):
sum_inds = (sum_inds,)
if T2.inds != T1.inds:
T2 = T2.transpose(*T1.inds)
sum_axes = tuple(T1.inds.index(ind) for ind in sum_inds)
if inplace:
new_T = T1
else:
new_T = T1.copy()
# XXX: add T2s tags?
new_T.modify(data=array_direct_product(T1.data, T2.data,
sum_axes=sum_axes))
return new_T
def bonds(t1, t2):
"""Getting any indices connecting the Tensor(s) or TensorNetwork(s) ``t1``
and ``t2``.
"""
if isinstance(t1, Tensor):
ix1 = set(t1.inds)
else:
ix1 = set(concat(t.inds for t in t1))
if isinstance(t2, Tensor):
ix2 = set(t2.inds)
else:
ix2 = set(concat(t.inds for t in t2))
return ix1 & ix2
def bonds_size(t1, t2):
"""Get the size of the bonds linking tensors or tensor networks ``t1`` and
``t2``.
"""
return prod(t1.ind_size(ix) for ix in bonds(t1, t2))
def connect(t1, t2, ax1, ax2):
"""Connect two tensors by setting a shared index for the specified
dimensions. This is an inplace operation that will also affect any tensor
networks viewing these tensors.
Parameters
----------
t1 : Tensor
The first tensor.
t2 :
The second tensor.
ax1 : int
The dimension (axis) to connect on the first tensor.
ax2 : int
The dimension (axis) to connect on the second tensor.
Examples
--------
>>> X = rand_tensor([2, 3], inds=['a', 'b'])
>>> Y = rand_tensor([3, 4], inds=['c', 'd'])
>>> tn = (X | Y) # is *view* of tensors (``&`` would copy them)
>>> print(tn)
TensorNetwork([
Tensor(shape=(2, 3), inds=('a', 'b'), tags=set()),
Tensor(shape=(3, 4), inds=('c', 'd'), tags=set()),
])
>>> connect(X, Y, 1, 0) # modifies tensors *and* viewing TN
>>> print(tn)
TensorNetwork([
Tensor(shape=(2, 3), inds=('a', '_e9021e0000002'), tags=set()),
Tensor(shape=(3, 4), inds=('_e9021e0000002', 'd'), tags=set()),
])
>>> tn ^ all
Tensor(shape=(2, 4), inds=('a', 'd'), tags=set())
"""
d1, d2 = t1.shape[ax1], t2.shape[ax2]
if d1 != d2:
raise ValueError("Index sizes don't match: {} != {}.".format(d1, d2))
new_ind = rand_uuid()
ind1 = t1.inds[ax1]
ind2 = t2.inds[ax2]
t1.reindex_({ind1: new_ind})
t2.reindex_({ind2: new_ind})
def get_tags(ts):
"""Return all the tags in found in ``ts``.
Parameters
----------
ts : Tensor, TensorNetwork or sequence of either
The objects to combine tags from.
"""
if isinstance(ts, (TensorNetwork, Tensor)):
ts = (ts,)
return set().union(*[t.tags for t in ts])
def tags2set(tags):
"""Parse a ``tags`` argument into a set - leave if already one.
"""
if isinstance(tags, set):
return tags
elif tags is None:
return set()
elif isinstance(tags, str):
return {tags}
else:
return set(tags)
# --------------------------------------------------------------------------- #
# Tensor Class #
# --------------------------------------------------------------------------- #
class Tensor(object):
"""A labelled, tagged ndarray. The index labels are used instead of
axis numbers to identify dimensions, and are preserved through operations.
Parameters
----------
data : numpy.ndarray
The n-dimensional data.
inds : sequence of str
The index labels for each dimension. Must match the number of
dimensions of ``data``.
tags : sequence of str, optional
Tags with which to identify and group this tensor. These will
be converted into a ``set``.
left_inds : sequence of str, optional
Which, if any, indices to group as 'left' indices of an effective
matrix. This can be useful, for example, when automatically applying
unitary constraints to impose a certain flow on a tensor network but at
the atomistic (Tensor) level.
Examples
--------
Basic construction:
>>> from quimb import randn
>>> from quimb.tensor import Tensor
>>> X = Tensor(randn((2, 3, 4)), inds=['a', 'b', 'c'], tags={'X'})
>>> Y = Tensor(randn((3, 4, 5)), inds=['b', 'c', 'd'], tags={'Y'})
Indices are automatically aligned, and tags combined, when contracting:
>>> X @ Y
Tensor(shape=(2, 5), inds=('a', 'd'), tags={'Y', 'X'})
"""
def __init__(self, data, inds, tags=None, left_inds=None):
# a new or copied Tensor always has no owners
self.owners = {}
# Short circuit for copying Tensors
if isinstance(data, Tensor):
self._data = data.data
self._inds = data.inds
self._tags = data.tags.copy()
self._left_inds = data.left_inds
return
self._data = asarray(data)
self._inds = tuple(inds)
self._tags = tags2set(tags)
self._left_inds = tuple(left_inds) if left_inds is not None else None
nd = ndim(self._data)
if nd != len(self.inds):
raise ValueError(
"Wrong number of inds, {}, supplied for array"
" of shape {}.".format(self.inds, self._data.shape))
if self.left_inds and any(i not in self.inds for i in self.left_inds):
raise ValueError(
"The 'left' indices {} are not found in {}."
"".format(self.left_inds, self.inds))
def copy(self, deep=False):
"""Copy this tensor. Note by default (``deep=False``), the underlying
array will *not* be copied.
"""
if deep:
return copy.deepcopy(self)
else:
return Tensor(self, None)
__copy__ = copy
@property
def data(self):
return self._data
@property
def inds(self):
return self._inds
@property
def tags(self):
return self._tags
@property
def left_inds(self):
return self._left_inds
@left_inds.setter
def left_inds(self, left_inds):
self._left_inds = tuple(left_inds) if left_inds is not None else None
def add_owner(self, tn, tid):
"""Add ``tn`` as owner of this Tensor - it's tag and ind maps will
be updated whenever this tensor is retagged or reindexed.
"""
self.owners[hash(tn)] = (weakref.ref(tn), tid)
def remove_owner(self, tn):
"""Remove TensorNetwork ``tn`` as an owner of this Tensor.
"""
try:
del self.owners[hash(tn)]
except KeyError:
pass
def check_owners(self):
"""Check if this tensor is 'owned' by any alive TensorNetworks. Also
trim any weakrefs to dead TensorNetworks.
"""
# first parse out dead owners
for k in tuple(self.owners):
if not self.owners[k][0]():
del self.owners[k]
return len(self.owners) > 0
def modify(self, **kwargs):
"""Overwrite the data of this tensor in place.
Parameters
----------
data : array, optional
New data.
inds : sequence of str, optional
New tuple of indices.
tags : sequence of str, optional
New tags.
"""
if 'data' in kwargs:
self._data = asarray(kwargs.pop('data'))
if 'inds' in kwargs:
inds = tuple(kwargs.pop('inds'))
# if this tensor has owners, update their ``ind_map``.
if self.check_owners():
for ref, tid in self.owners.values():
ref()._modify_tensor_inds(self.inds, inds, tid)
self._inds = inds
if 'tags' in kwargs:
tags = tags2set(kwargs.pop('tags'))
# if this tensor has owners, update their ``tag_map``.
if self.check_owners():
for ref, tid in self.owners.values():
ref()._modify_tensor_tags(self.tags, tags, tid)
self._tags = tags
if 'left_inds' in kwargs:
self.left_inds = kwargs.pop('left_inds')
if kwargs:
raise ValueError("Option(s) {} not valid.".format(kwargs))
if len(self.inds) != ndim(self.data):
raise ValueError("Mismatch between number of data dimensions and "
"number of indices supplied.")
if self.left_inds and any(i not in self.inds for i in self.left_inds):
raise ValueError(
"The 'left' indices {} are not found in {}."
"".format(self.left_inds, self.inds))
def isel(self, selectors, inplace=False):
"""Select specific values for some dimensions/indices of this tensor,
thereby removing them. Analogous to ``X[:, :, 3, :, :]`` with arrays.
Parameters
----------
selectors : dict[str, int]
Mapping of index(es) to which value to take.
inplace : bool, optional
Whether to select inplace or not.
Returns
-------
Tensor
Examples
--------
>>> T = rand_tensor((2, 3, 4), inds=('a', 'b', 'c'))
>>> T.isel({'b': -1})
Tensor(shape=(2, 4), inds=('a', 'c'), tags=set())
See Also
--------
TensorNetwork.isel
"""
T = self if inplace else self.copy()
new_inds = tuple(ix for ix in self.inds if ix not in selectors)
data_loc = tuple(selectors.get(ix, slice(None)) for ix in self.inds)
new_data = self.data[data_loc]
T.modify(data=new_data, inds=new_inds)
return T
isel_ = functools.partialmethod(isel, inplace=True)
def add_tag(self, tag):
"""Add a tag to this tensor. Unlike ``self.tags.add`` this also updates
any TensorNetworks viewing this Tensor.
"""
self.modify(tags=set.union(self.tags, {tag}))
def conj(self, inplace=False):
"""Conjugate this tensors data (does nothing to indices).
"""
conj_data = conj(self.data)
if inplace:
self._data = conj_data
return self
else:
return Tensor(conj_data, self.inds, self.tags)
conj_ = functools.partialmethod(conj, inplace=True)
@property
def H(self):
"""Conjugate this tensors data (does nothing to indices).
"""
return self.conj()
@property
def shape(self):
return self._data.shape
@property
def ndim(self):
return ndim(self._data)
@property
def size(self):
return self._data.size
@property
def dtype(self):
return self._data.dtype
def isreal(self):
return not iscomplex(self.data)
def iscomplex(self):
return iscomplex(self.data)
def astype(self, dtype, inplace=False):
"""Change the type of this tensor to ``dtype``.
"""
T = self if inplace else self.copy()
T.modify(data=self.data.astype(dtype))
return T
astype_ = functools.partialmethod(astype, inplace=True)
def ind_size(self, ind):
"""Return the size of dimension corresponding to ``ind``.
"""
return self.shape[self.inds.index(ind)]
def shared_bond_size(self, other):
"""Get the total size of the shared index(es) with ``other``.
"""
return bonds_size(self, other)
def inner_inds(self):
"""
"""
ind_freqs = frequencies(self.inds)
return tuple(i for i in self.inds if ind_freqs[i] == 2)
def transpose(self, *output_inds, inplace=False):
"""Transpose this tensor.
Parameters